summaryrefslogtreecommitdiff
path: root/Documentation/admin-guide/nfs
ModeNameSize
-rw-r--r--index.rst190logplain
-rw-r--r--nfs-client.rst5462logplain
-rw-r--r--nfs-idmapper.rst3361logplain
-rw-r--r--nfs-rdma.rst9209logplain
-rw-r--r--nfsd-admin-interfaces.rst1489logplain
-rw-r--r--nfsroot.rst13177logplain
-rw-r--r--pnfs-block-server.rst1802logplain
-rw-r--r--pnfs-scsi-server.rst1235logplain
gpu/Kconfig89
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile271
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ObjectID.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c469
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.h (renamed from drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h)14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran_reg_init.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2496
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c984
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h232
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c727
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c1124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c894
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h523
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c197
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.h27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c424
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c188
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c559
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c1123
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c687
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c835
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c384
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c714
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c686
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c1264
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h116
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c3239
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c634
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c974
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c233
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c307
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c304
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c968
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c673
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c591
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c1969
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c135
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c825
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c2201
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h (renamed from drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h)32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c372
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c8199
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_df.h64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c3157
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c1308
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.h58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c574
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h372
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c244
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h556
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c2843
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c240
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.h37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c241
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c440
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c804
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c408
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c145
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c413
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c1120
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c2443
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h643
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c1679
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h471
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c372
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c248
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c122
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c366
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c642
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h94
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c323
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h140
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_imu.h58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c597
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c343
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c442
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h129
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c607
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h184
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c1592
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_lsdma.c91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_lsdma.h46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c630
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h169
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c770
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h506
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes_ctx.h122
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c373
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h259
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c1784
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h244
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pll.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c1659
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c680
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.h57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c319
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c145
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c143
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c4431
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h595
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c392
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.h34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c126
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rap.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c5492
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h1011
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c1564
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h169
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h194
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c354
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h176
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c676
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h466
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c576
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h127
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c550
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h363
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c371
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h (renamed from drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.h)22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c611
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h204
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c187
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c262
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h)42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_socbb.h82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c311
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c239
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h336
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c3188
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h222
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c1354
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h458
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c588
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h194
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c550
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h246
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c1090
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h142
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c1009
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c749
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c543
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c1637
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h571
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c1547
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h379
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c659
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c3508
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h661
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c976
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c296
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c992
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c980
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c1107
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h217
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c1795
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h133
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h456
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c985
-rw-r--r--drivers/gpu/drm/amd/amdgpu/arct_reg_init.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v1_0.c105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v1_0.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_0.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_0.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_1.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_1.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v3_0.c139
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v3_0.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.c122
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c314
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.h41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_crtc.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c163
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_i2c.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c7047
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.h356
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_smc.c279
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c889
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c125
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c435
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/clearstate_gfx10.h975
-rw-r--r--drivers/gpu/drm/amd/amdgpu/clearstate_gfx11.h997
-rw-r--r--drivers/gpu/drm/amd/amdgpu/clearstate_gfx12.h121
-rw-r--r--drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/clearstate_si.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c155
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c670
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c3872
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c1478
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c646
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c823
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v1_7.c130
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v1_7.h40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.c685
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.h41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v4_15.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v4_15.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v4_3.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v4_3.h (renamed from drivers/gpu/drm/amd/powerplay/inc/pp_power_source.h)15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v4_6_2.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v4_6_2.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dimgrey_cavefish_reg_init.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/emu_soc.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c10247
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0_cleaner_shader.h91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_1_10_cleaner_shader.asm125
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_3_0_cleaner_shader.asm124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c7537
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c110
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3_cleaner_shader.asm118
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0_cleaner_shader.h56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c5793
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c2500
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c1982
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c3566
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c7355
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0_cleaner_shader.h68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c1019
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c1938
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2_cleaner_shader.asm153
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c5063
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.asm153
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3_cleaner_shader.h64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v11_5_0.c516
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v11_5_0.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v12_0.c521
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v12_0.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c599
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c674
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c488
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c667
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c513
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c501
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c1154
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c1056
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c1043
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c629
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c731
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c954
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c2446
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c177
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_0.c214
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_0.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_2.c206
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v5_2.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v6_0.c144
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v6_0.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v7_0.c132
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v7_0.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c157
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c817
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_1.c796
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_1.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v7_0.c787
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v7_0.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0.c393
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0.h (renamed from drivers/gpu/drm/amd/amdgpu/dce_virtual.h)8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.c145
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0_3.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v12_0.c406
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v12_0.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.c191
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_0.h50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c377
-rw-r--r--drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.h49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c687
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.h43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c855
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c869
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c648
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c878
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.h35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c1486
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.h74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c872
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.h35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c733
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.h35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c1100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.h111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/lsdma_v6_0.c121
-rw-r--r--drivers/gpu/drm/amd/amdgpu/lsdma_v6_0.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/lsdma_v7_0.c121
-rw-r--r--drivers/gpu/drm/amd/amdgpu/lsdma_v7_0.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mca_v3_0.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mca_v3_0.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_userqueue.c485
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_userqueue.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c1754
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c1931
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c981
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c1371
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c871
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c718
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c629
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c664
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c597
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c570
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c746
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c647
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c1718
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h338
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v3_0.h132
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v4_0.h144
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v4_0_3.h37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v5_0.h144
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c365
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c586
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c729
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h4886
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.c554
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbif_v6_3_1.h33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c590
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c634
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v4_3.h34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c383
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c322
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_11.c400
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_11.h33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c443
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_2.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c827
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c369
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_7.h33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c694
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_9.h33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c1108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.h36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nvd.h678
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h343
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c174
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c663
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c186
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c291
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c977
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c355
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v14_0.c705
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v14_0.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c564
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.h21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_common.h42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c396
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c572
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c2356
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c274
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c2609
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.h (renamed from drivers/gpu/drm/amd/amdgpu/dce_v11_0.h)12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c2102
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c2112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c1916
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0_0_pkt_open.h5672
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c1858
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c1603
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c398
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_enums.h227
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c137
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sid.h1963
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c298
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sienna_cichlid.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c794
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c296
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v11_0.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v11_0.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c161
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v13_0.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v13_0_3.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v13_0_6.c (renamed from drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c)29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v13_0_6.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v14_0_2.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v9_0.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v9_0.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c1518
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h101
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h214
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15d.h302
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c1021
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc24.c601
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc24.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_rap_if.h84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_ras_if.h223
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h175
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h177
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c199
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.c728
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v12_0.h105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_0.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_0.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.c462
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.h52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_7.c528
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_7.h77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_10.c458
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_10.h75
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_14.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_14.h51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_7.c446
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_7.h51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.c434
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umsch_mm_v4_0.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c849
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v3_1.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c223
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c228
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c772
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c1826
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c119
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c290
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c636
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_sw_ring.c86
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_sw_ring.h44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c2209
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c2204
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c2208
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c2368
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c2315
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.h35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c2131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.h44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c1723
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.h35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c1442
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.h37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c1727
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.h39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c625
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c755
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c1302
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vpe_6_1_fw_if.h218
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vpe_v6_1.c398
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vpe_v6_1.h29
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig37
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile77
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c95
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_int.h8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_regs.h5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h4656
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm1434
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm1136
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm747
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm1135
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c3175
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c2470
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.h92
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c886
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h193
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c168
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h294
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debug.c1152
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debug.h142
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c187
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c1710
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c3849
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h253
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c143
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c90
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v11.c90
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v12.c90
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c135
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c181
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c293
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c1326
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.h30
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c151
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c386
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c418
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c605
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c152
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c242
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h51
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c1079
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.h56
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c118
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c295
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h132
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c465
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c533
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c569
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c459
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c1034
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c431
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c768
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c507
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c318
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c96
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h333
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h661
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_aldebaran.h94
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_diq.h290
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h154
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_opcodes.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h1379
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c2250
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c1018
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c408
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c411
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.h57
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c4271
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h276
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c2411
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h115
-rw-r--r--drivers/gpu/drm/amd/amdkfd/soc15_int.h51
-rw-r--r--drivers/gpu/drm/amd/amdxcp/Makefile25
-rw-r--r--drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c152
-rw-r--r--drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.h30
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig59
-rw-r--r--drivers/gpu/drm/amd/display/Makefile66
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/Makefile59
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c13123
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h1114
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c1327
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c951
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h152
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c791
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h51
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c4275
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h37
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c827
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h90
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c1414
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c1000
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h104
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h50
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c1987
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h97
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c1911
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h68
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c808
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c277
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h43
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_quirks.c178
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c209
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h49
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c64
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h761
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c215
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.h36
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c112
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile83
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/Makefile38
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/bw_fixed.c188
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/calcs_logger.h578
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c158
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.h53
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/custom_float.c173
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dc_common.c102
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dc_common.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c3621
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c510
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/logger.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/vector.c306
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/Makefile57
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c2945
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c3807
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.c291
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_common.h (renamed from drivers/gpu/drm/amd/amdgpu/vi_dpm.h)15
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.c (renamed from drivers/gpu/drm/amd/powerplay/inc/pp_feature.h)97
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_helper.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_interface.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal.h72
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser_types_internal2.h75
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c2432
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.h99
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c1087
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.h105
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c399
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper.h70
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c283
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.h57
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper_struct.h66
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.c232
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce110/command_table_helper_dce110.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.c288
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper2_dce112.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.c286
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce112/command_table_helper_dce112.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.c250
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce60/command_table_helper_dce60.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.c250
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/dce80/command_table_helper_dce80.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile191
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c442
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c472
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.h59
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c302
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.h44
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c237
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c153
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c166
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c348
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_clk.h29
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c144
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv2_clk_mgr.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv2_clk_mgr.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c589
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h59
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c217
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c787
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h49
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c257
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h62
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c587
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.h98
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c338
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.h51
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_smu11_driver_if.h75
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c118
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c281
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.h164
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c769
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.h52
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c809
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.h)50
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c359
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.h271
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c1051
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.h73
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c398
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.h110
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c737
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.h49
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.c366
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_smu.h130
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c680
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.h49
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.c344
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_smu.h139
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dalsmc.h74
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c1233
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c308
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_smu13_driver_if.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c141
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c1556
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.h67
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c508
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.h220
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dalsmc.h55
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c1631
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h117
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c472
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.h41
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_smu14_driver_if.h66
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c6380
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c318
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c1266
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c745
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c530
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c5620
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_sink.c100
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stat.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c1077
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c1232
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c312
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h2754
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h190
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_ddc_types.h198
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c2250
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h334
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h1531
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h120
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_edid_parser.c80
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_edid_parser.h44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_fused_io.c148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_fused_io.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h133
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c762
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h1160
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane.h51
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane_priv.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.c229
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stat.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_state.h77
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_state_priv.h128
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h607
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream_priv.h75
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_trace.h44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h1410
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/Makefile103
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.c176
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h464
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn201/dcn201_dccg.c85
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn201/dcn201_dccg.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.c132
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn30/dcn30_dccg.c103
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn30/dcn30_dccg.h66
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.h59
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn302/dcn302_dccg.h41
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn303/dcn303_dccg.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.c755
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h239
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.c406
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h212
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn32/dcn32_dccg.c375
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn32/dcn32_dccg.h125
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c2470
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.h247
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c914
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h249
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c306
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.h392
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c1375
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.h182
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c970
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.h321
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c967
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c1873
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h331
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c1192
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h331
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c.c83
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c.h44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c756
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h355
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.c496
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_sw.h57
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c311
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_ipp.h287
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c1816
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h320
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c1006
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h467
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.c768
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.h358
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c294
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h129
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c1445
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters_old.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c1569
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.h735
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.c1689
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_transform.h699
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c245
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c318
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c88
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.h (renamed from drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.h)12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c510
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h60
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c457
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h41
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/Makefile35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c458
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.h79
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c1041
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c737
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_regamma_v.c555
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_v.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c2362
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.h294
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c707
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c717
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.h58
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/Makefile32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.c847
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_compressor.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/Makefile33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c1284
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/Makefile35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.c268
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/dce60_timing_generator.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/Makefile35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.c250
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_timing_generator.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c683
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.h120
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.c129
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dwb.h267
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c622
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h223
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/Makefile8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.c343
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb.h431
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c876
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c99
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h77
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/Makefile9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c208
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h71
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c125
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.h86
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.h73
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/Makefile8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c443
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h100
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/Makefile33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c215
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h171
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_cm_common.c456
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.c239
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mmhubbub.h448
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_opp.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c264
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h157
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.c219
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_panel_cntl.h97
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/Makefile13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.h126
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c119
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.h111
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c200
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h163
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/Makefile117
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.c1474
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_link_encoder.h659
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c1627
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.h749
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_link_encoder.c502
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_link_encoder.h366
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c661
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.h119
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_link_encoder.c257
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_link_encoder.h86
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_stream_encoder.c899
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn30/dcn30_dio_stream_encoder.h331
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn301/dcn301_dio_link_encoder.c191
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn301/dcn301_dio_link_encoder.h82
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.c682
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn31/dcn31_dio_link_encoder.h290
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c506
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.h355
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_link_encoder.c328
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_link_encoder.h53
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c493
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.h206
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn321/dcn321_dio_link_encoder.c190
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn321/dcn321_dio_link_encoder.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.c391
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_link_encoder.h188
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c520
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.h332
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_link_encoder.c322
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_link_encoder.h134
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c856
-rw-r--r--drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.h240
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_cp_psp.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_event_log.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h229
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h321
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h317
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h304
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile143
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.c1933
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_math.c145
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c1619
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dc_features.h566
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.c159
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn10/dcn10_fpu.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c2559
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h89
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c5115
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c5231
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c1665
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h73
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c1666
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h73
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c6150
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c1786
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h73
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c739
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.h73
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c6626
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.h54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c1778
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h69
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c486
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h41
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c367
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.c380
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn303/dcn303_fpu.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c827
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h59
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c7228
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c1591
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.h69
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c432
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c7343
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.h44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c1678
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.h70
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c3613
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h80
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c3762
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c6350
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h1170
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c614
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.h70
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c931
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c620
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c639
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h311
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c320
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h109
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h741
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c1146
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h1261
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c382
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c1922
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h66
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h160
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_logger.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dsc/qp_tables.h704
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c260
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h90
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/Makefile141
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/cmntypes.h94
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c10335
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.h204
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h2033
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_lib_defines.h79
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.c798
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/display_mode_util.h76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c929
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c516
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c470
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h135
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/bounding_boxes/dcn4_soc_bb.h373
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml2_external_lib_deps.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h190
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h509
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_policy_types.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h210
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h737
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.c660
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c13315
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_factory.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h2326
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c786
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c785
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.c198
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_factory.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c706
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c2371
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.c83
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.c147
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.h25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.c49
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_legacy.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.c1170
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h189
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h988
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c1174
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.h52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h157
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c910
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.h52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.c311
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_policy.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c1525
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h41
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c560
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h149
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c704
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h309
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml_assert.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml_depedencies.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.c573
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml_display_rq_dlg_calc.h63
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml_logging.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/Makefile83
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c598
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.h1530
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_cm.c882
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp_dscl.c696
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.c491
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp.h788
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn20/dcn20_dpp_cm.c1202
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.c324
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn201/dcn201_dpp.h83
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c1526
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.h645
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c461
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.c238
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn32/dcn32_dpp.h45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.c149
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn35/dcn35_dpp.h67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c428
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h740
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c229
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c1186
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/Makefile39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c1459
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c768
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h618
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c112
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.h59
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c393
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h346
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dsc.h114
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h55
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c64
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c122
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/Makefile46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_cm_common.h74
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.c272
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb.h919
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn30/dcn30_dwb_cm.c395
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn35/dcn35_dwb.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/dwb/dcn35/dcn35_dwb.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/Makefile134
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c180
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.c387
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce110/hw_translate_dce110.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c198
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c409
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_factory_dce60.c175
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_factory_dce60.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_translate_dce60.c411
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce60/hw_translate_dce60.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.c175
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.c411
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dce80/hw_translate_dce80.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c231
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c409
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c261
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c383
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c240
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c373
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c269
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.c388
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn315/hw_factory_dcn315.c260
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn315/hw_factory_dcn315.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn315/hw_translate_dcn315.c374
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn315/hw_translate_dcn315.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c271
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_translate_dcn32.c349
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_translate_dcn32.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn401/hw_factory_dcn401.c264
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn401/hw_factory_dcn401.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn401/hw_translate_dcn401.c335
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn401/hw_translate_dcn401.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h201
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/generic_regs.h66
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c352
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_regs.h45
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c675
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/gpio_service.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hpd_regs.h79
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c250
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.h49
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c125
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.h77
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_generic.c129
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_generic.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.c203
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_gpio.h144
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.c149
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_hpd.h49
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c126
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/Makefile28
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c425
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/Makefile50
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.c628
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_link_encoder.h235
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c780
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.h245
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn32/dcn32_hpo_dp_link_encoder.h68
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/Makefile104
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.c946
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn10/dcn10_hubbub.h518
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn20/dcn20_hubbub.c692
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn20/dcn20_hubbub.h144
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn201/dcn201_hubbub.c107
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn201/dcn201_hubbub.h45
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.c723
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn21/dcn21_hubbub.h158
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c508
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.h142
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn301/dcn301_hubbub.c84
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn301/dcn301_hubbub.h60
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c1096
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.h147
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c1063
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.h166
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c615
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.h171
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c1272
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.h206
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/Makefile97
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.c1420
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h825
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.c1718
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn20/dcn20_hubp.h416
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.c156
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn201/dcn201_hubp.h132
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.c864
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn21/dcn21_hubp.h134
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c583
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.h310
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.c136
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn31/dcn31_hubp.h253
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c232
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.h70
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.c246
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn35/dcn35_hubp.h76
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c1097
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h373
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/Makefile202
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.c219
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h1303
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce100/dce100_hwseq.c172
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce100/dce100_hwseq.h54
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c3421
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h125
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce112/dce112_hwseq.c160
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce112/dce112_hwseq.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce120/dce120_hwseq.c270
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce120/dce120_hwseq.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.c433
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce60/dce60_hwseq.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce80/dce80_hwseq.c55
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce80/dce80_hwseq.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c4144
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h221
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c127
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c3247
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h174
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c146
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c615
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c136
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c302
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.h59
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c151
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c1278
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.h106
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c156
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_hwseq.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_hwseq.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c152
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_hwseq.c223
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_hwseq.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_hwseq.c (renamed from drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c)54
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_hwseq.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c716
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.h64
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c158
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c602
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.h52
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c165
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c1847
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h135
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c174
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c1594
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h104
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c178
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile28
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.c182
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_hwseq.h41
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c172
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c2671
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h113
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.c153
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_init.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h591
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h206
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/bw_fixed.h166
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/clock_source.h191
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/compressor.h137
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h70
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h707
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/custom_float.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h489
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calc_math.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h649
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h71
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/audio.h66
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h187
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h371
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h474
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/cursor_reg_cache.h99
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h219
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h264
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h96
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h359
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h240
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/gpio.h96
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h313
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h364
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h114
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h286
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mcif_wb.h109
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h194
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h1114
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h373
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/optc.h197
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h72
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h55
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h365
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h386
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/transform.h293
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/vmid.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/vpg.h53
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h120
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_hwss.h89
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_service.h350
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/reg_helper.h558
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h662
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/soc_and_ip_translator.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/vm_helper.h48
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile200
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c430
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.h53
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c268
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.c366
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce60/irq_service_dce60.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c278
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c380
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c385
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c339
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c413
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c422
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c387
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c283
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.c404
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn31/irq_service_dcn31.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.c406
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn314/irq_service_dcn314.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.c411
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn315/irq_service_dcn315.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.c436
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn32/irq_service_dcn32.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.c400
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn35/irq_service_dcn35.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.c382
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn351/irq_service_dcn351.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.c381
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn36/irq_service_dcn36.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.c414
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn401/irq_service_dcn401.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c243
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.h93
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq_types.h237
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/Makefile64
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c1018
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h44
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c172
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h63
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c348
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h63
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c199
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c176
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c228
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h62
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c233
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c1473
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c2662
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.h53
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c864
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.c62
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_resource.c114
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_resource.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c626
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.h45
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c603
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h106
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c2551
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h117
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c171
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c442
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h112
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c546
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h41
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c210
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h60
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c1816
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h201
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c267
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h43
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c491
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h62
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c80
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c1046
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h60
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c553
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h45
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c250
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c1299
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h81
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c240
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h54
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/Makefile54
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.c324
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn20/dcn20_mmhubbub.h517
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn32/dcn32_mmhubbub.c239
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn32/dcn32_mmhubbub.h211
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn35/dcn35_mmhubbub.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/mmhubbub/dcn35/dcn35_mmhubbub.h75
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/Makefile72
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c521
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.h200
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn20/dcn20_mpc.c610
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn20/dcn20_mpc.h312
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.c1576
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h1105
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c1051
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h402
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c634
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h257
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/Makefile51
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.c413
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn10/dcn10_opp.h191
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.c415
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn20/dcn20_opp.h174
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/opp/dcn35/dcn35_opp.h67
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/Makefile114
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c1683
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h650
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c580
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h128
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c198
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.h71
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c438
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h368
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c190
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c395
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h277
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c274
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h262
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c383
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h195
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c537
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h83
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.c551
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn401/dcn401_optc.h196
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h94
-rw-r--r--drivers/gpu/drm/amd/display/dc/pg/Makefile35
-rw-r--r--drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.c573
-rw-r--r--drivers/gpu/drm/amd/display/dc/pg/dcn35/dcn35_pg_cntl.h195
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/Makefile225
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c1196
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h63
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c1552
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.h54
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c1432
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h57
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c1305
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c1478
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c1490
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.h47
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c1690
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h57
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c2779
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h171
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c1316
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c1716
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h56
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c2632
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h108
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c1738
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.h45
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c1522
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c1454
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c2269
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h106
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c2155
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c2176
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.h44
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c2044
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.h44
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c2926
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h1281
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource_helpers.c780
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c2074
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.h45
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c2219
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h315
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c2192
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c2192
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.h73
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c2278
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h655
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile19
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c304
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/Makefile33
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c1910
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl.h27
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_filters.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_filters.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_isharp_filters.c553
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_isharp_filters.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_easf_filters.c2586
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_easf_filters.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_filters.c1233
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_scl_filters.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h559
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_custom_float.c151
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_custom_float.h29
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_debug.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.c493
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_fixpt31_32.h522
-rw-r--r--drivers/gpu/drm/amd/display/dc/sspl/spl_os_types.h56
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/Makefile30
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c132
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_hwss.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c181
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.h39
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h1071
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv_stat.h41
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h6770
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_trace_buffer.h68
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/Makefile34
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c479
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h252
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c56
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h35
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c202
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.h50
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn301.c58
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn301.h37
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn302.c58
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn302.h37
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn303.c59
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn303.h38
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c495
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h258
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.c67
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn314.h35
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn315.c62
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn315.h68
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn316.c62
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn316.h33
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c548
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h273
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c571
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.h288
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn351.c34
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn351.h13
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn36.c34
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn36.h13
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c667
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.h290
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c109
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h123
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c1370
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c128
-rw-r--r--drivers/gpu/drm/amd/display/include/audio_types.h121
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_interface.h44
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_types.h349
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h314
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_types.h71
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h134
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h186
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed31_32.h540
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_interface.h109
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_service_interface.h124
-rw-r--r--drivers/gpu/drm/amd/display/include/gpio_types.h332
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h442
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_defs.h196
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h313
-rw-r--r--drivers/gpu/drm/amd/display/include/hdcp_msg_types.h108
-rw-r--r--drivers/gpu/drm/amd/display/include/irq_service_interface.h51
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h263
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_interface.h147
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h83
-rw-r--r--drivers/gpu/drm/amd/display/include/set_mode_types.h102
-rw-r--r--drivers/gpu/drm/amd/display/include/signal_types.h156
-rw-r--r--drivers/gpu/drm/amd/display/include/vector.h150
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/Makefile31
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c2014
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.h118
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_table.c64
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_table.h47
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/luts_1d.h51
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/Makefile31
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c1283
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/Makefile33
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c599
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h585
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c535
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c322
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c913
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c701
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c763
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c361
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h134
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c1031
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h532
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h146
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h360
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h80
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_shared.h108
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_stats.h75
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_vmid.h46
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/Makefile31
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c579
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/Makefile31
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c1046
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h84
-rw-r--r--drivers/gpu/drm/amd/display/modules/vmid/vmid.c174
-rw-r--r--drivers/gpu/drm/amd/include/aldebaran_ip_offset.h1738
-rw-r--r--drivers/gpu/drm/amd/include/amd_acpi.h195
-rw-r--r--drivers/gpu/drm/amd/include/amd_cper.h269
-rw-r--r--drivers/gpu/drm/amd/include/amd_pcie.h22
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h553
-rw-r--r--drivers/gpu/drm/amd/include/amdgpu_reg_state.h153
-rw-r--r--drivers/gpu/drm/amd/include/arct_ip_offset.h1648
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/athub/athub_1_0_offset.h453
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/athub/athub_1_0_sh_mask.h2045
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/athub/athub_1_8_0_offset.h411
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/athub/athub_1_8_0_sh_mask.h1807
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/athub/athub_2_0_0_default.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/ATHUB/athub_1_0_default.h)53
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/athub/athub_2_0_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/ATHUB/athub_1_0_offset.h)379
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/athub/athub_2_0_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/ATHUB/athub_1_0_sh_mask.h)273
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/athub/athub_2_1_0_offset.h523
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/athub/athub_2_1_0_sh_mask.h2378
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/athub/athub_3_0_0_offset.h259
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/athub/athub_3_0_0_sh_mask.h1246
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/athub/athub_4_1_0_offset.h287
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/athub/athub_4_1_0_sh_mask.h1348
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/clk/clk_10_0_2_offset.h56
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/clk/clk_10_0_2_sh_mask.h73
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/clk/clk_11_0_0_offset.h33
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/clk/clk_11_0_0_sh_mask.h38
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/clk/clk_11_0_1_offset.h32
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/clk/clk_11_0_1_sh_mask.h37
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/clk/clk_11_5_0_offset.h50
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/clk/clk_11_5_0_sh_mask.h70
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_10_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_0_sh_mask.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_11_2_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_offset.h)16
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_sh_mask.h)162
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_d.h83
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_6_0_sh_mask.h112
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_8_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h14114
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h54345
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_offset.h17539
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_0_sh_mask.h68033
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_1_offset.h6193
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_1_sh_mask.h22091
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h13875
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_sh_mask.h56648
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_offset.h18022
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_0_sh_mask.h71029
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_1_offset.h13271
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_1_sh_mask.h53361
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_offset.h16273
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_2_sh_mask.h62433
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_offset.h8471
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_0_3_sh_mask.h35366
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h15089
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h60782
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_4_offset.h15245
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_4_sh_mask.h61832
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_offset.h15195
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_5_sh_mask.h62071
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_offset.h15686
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_6_sh_mask.h62727
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_offset.h14737
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_0_sh_mask.h222948
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_offset.h14596
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_2_1_sh_mask.h56598
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_offset.h15279
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_0_sh_mask.h53485
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_1_offset.h15259
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_5_1_sh_mask.h53464
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_6_0_offset.h15485
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_6_0_sh_mask.h61940
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_offset.h16662
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_1_0_sh_mask.h145870
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_default.h26
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h37
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h52
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_default.h26
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h85
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h197
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_4_15_offset.h28
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_4_15_sh_mask.h28
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_offset.h30
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_sh_mask.h157
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_offset.h647
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_sh_mask.h3912
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_3_offset.h151
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_3_sh_mask.h952
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_offset.h565
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_sh_mask.h3430
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_offset.h604
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_0_sh_mask.h3574
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_3_offset.h204
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_0_3_sh_mask.h1194
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_1_4_offset.h7215
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_3_1_4_sh_mask.h55194
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_offset.h11973
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_0_sh_mask.h103385
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_2_offset.h11957
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_2_sh_mask.h103633
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_3_offset.h11969
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_4_2_3_sh_mask.h136141
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_default.h6028
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_offset.h11375
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h44165
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_default.h7275
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h13609
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h49369
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_default.h6114
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_offset.h11685
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_0_sh_mask.h41664
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h12094
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h44690
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_5_0_offset.h10002
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_5_0_sh_mask.h36579
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_12_0_0_offset.h11061
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_12_0_0_sh_mask.h40550
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_default.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/GC/gc_9_0_default.h)7
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/GC/gc_9_0_offset.h)83
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/GC/gc_9_0_sh_mask.h)263
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_offset.h7483
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_1_sh_mask.h31176
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_2_1_offset.h7503
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_2_1_sh_mask.h31186
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h266
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h764
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_2_offset.h7687
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_2_sh_mask.h33003
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_offset.h7450
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_3_sh_mask.h31649
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gca/gfx_6_0_d.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_0_offset.h209
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_0_sh_mask.h603
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_4_2_offset.h219
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_4_4_2_sh_mask.h663
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_5_0_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/HDP/hdp_4_0_offset.h)24
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_5_0_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/HDP/hdp_4_0_sh_mask.h)88
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_5_2_1_offset.h217
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_5_2_1_sh_mask.h684
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_6_0_0_offset.h209
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_6_0_0_sh_mask.h646
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_7_0_0_offset.h219
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/hdp/hdp_7_0_0_sh_mask.h735
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_6_0_0_offset.h391
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_6_0_0_sh_mask.h1439
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_0_0_offset.h388
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_0_0_sh_mask.h1411
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_default.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/MMHUB/mmhub_1_0_default.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/MMHUB/mmhub_1_0_offset.h)39
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/MMHUB/mmhub_1_0_sh_mask.h)122
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_7_offset.h5125
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_7_sh_mask.h32178
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_offset.h3366
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_1_8_0_sh_mask.h22628
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_2_0_0_default.h927
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_2_0_0_offset.h1799
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_2_0_0_sh_mask.h7567
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_2_3_0_default.h1253
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_2_3_0_offset.h2439
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_2_3_0_sh_mask.h10331
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_3_0_0_offset.h1529
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_3_0_0_sh_mask.h7478
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_3_0_1_offset.h1769
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_3_0_1_sh_mask.h7483
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_3_0_2_offset.h1425
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_3_0_2_sh_mask.h7228
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_3_3_0_offset.h1395
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_3_3_0_sh_mask.h6722
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_4_1_0_offset.h1341
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_4_1_0_sh_mask.h6943
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_1_offset.h1999
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_1_sh_mask.h9790
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_3_0_offset.h1991
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_3_0_sh_mask.h10265
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_default.h3933
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_offset.h7789
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h45068
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_10_0_default.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/MP/mp_9_0_default.h)164
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_10_0_offset.h336
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_10_0_sh_mask.h886
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_8_offset.h352
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_8_sh_mask.h355
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/MP/mp_9_0_offset.h)34
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/MP/mp_9_0_sh_mask.h)1082
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_5_0_offset.h400
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_5_0_sh_mask.h942
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_12_0_0_offset.h336
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_12_0_0_sh_mask.h866
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_0_offset.h461
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_0_sh_mask.h682
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_2_offset.h409
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_2_sh_mask.h631
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_4_offset.h402
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_4_sh_mask.h595
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_5_offset.h455
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_5_sh_mask.h672
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_offset.h456
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_6_sh_mask.h702
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_8_offset.h410
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_8_sh_mask.h603
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_14_0_0_offset.h359
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_14_0_0_sh_mask.h534
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_14_0_2_offset.h468
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_14_0_2_sh_mask.h692
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_9_0_offset.h375
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_9_0_sh_mask.h1463
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/NBIF/nbif_6_1_offset.h)2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/NBIF/nbif_6_1_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_3_1_offset.h11287
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_3_1_sh_mask.h32806
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_2_3_default.h18521
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_2_3_offset.h14663
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_2_3_sh_mask.h120339
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_4_3_0_offset.h17381
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_4_3_0_sh_mask.h82050
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_default.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/NBIO/nbio_6_1_default.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/NBIO/nbio_6_1_offset.h)2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/NBIO/nbio_6_1_sh_mask.h)4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_6_1_smn.h61
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_default.h14865
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h4642
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_sh_mask.h118975
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_smn.h63
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_offset.h9406
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_11_0_sh_mask.h57899
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_offset.h31871
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_2_0_sh_mask.h152483
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h68
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h4631
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h48482
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_offset.h29660
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_7_0_sh_mask.h154426
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_offset.h10004
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_9_0_sh_mask.h38900
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_d.h23
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/oss_1_0_sh_mask.h41
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_1_offset.h337
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_1_sh_mask.h1249
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/OSSSYS/osssys_4_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/OSSSYS/osssys_4_0_sh_mask.h)8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_offset.h351
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_2_0_sh_mask.h1311
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_4_2_offset.h263
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_4_2_sh_mask.h995
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_5_0_0_offset.h353
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_5_0_0_sh_mask.h1305
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_offset.h267
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_0_0_sh_mask.h979
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_1_0_offset.h279
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_6_1_0_sh_mask.h1019
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_7_0_0_offset.h279
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/oss/osssys_7_0_0_sh_mask.h1029
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/pcie/pcie_6_1_0_offset.h630
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/pcie/pcie_6_1_0_sh_mask.h4250
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_offset.h27
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_sh_mask.h30
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/rsmu/rsmu_0_0_2_offset.h27
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/rsmu/rsmu_0_0_2_sh_mask.h32
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_0_offset.h5224
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_0_sh_mask.h13922
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_offset.h1113
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma/sdma_4_4_2_sh_mask.h3300
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_0_default.h286
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_0_offset.h547
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_0_sh_mask.h1852
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_1_default.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA0/sdma0_4_0_default.h)54
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_1_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA0/sdma0_4_0_offset.h)92
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_1_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA0/sdma0_4_0_sh_mask.h)206
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_2_offset.h1051
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_2_sh_mask.h3002
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_offset.h1047
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma0/sdma0_4_2_sh_mask.h2992
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_0_default.h282
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_0_offset.h539
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_0_sh_mask.h1810
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_2_offset.h1043
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_2_sh_mask.h2956
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA1/sdma1_4_0_offset.h)516
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma1/sdma1_4_2_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA1/sdma1_4_0_sh_mask.h)1246
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma2/sdma2_4_2_2_offset.h1043
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma2/sdma2_4_2_2_sh_mask.h2956
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma3/sdma3_4_2_2_offset.h1043
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma3/sdma3_4_2_2_sh_mask.h2956
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma4/sdma4_4_2_2_offset.h1043
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma4/sdma4_4_2_2_sh_mask.h2956
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma5/sdma5_4_2_2_offset.h1043
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma5/sdma5_4_2_2_sh_mask.h2956
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma6/sdma6_4_2_2_offset.h1043
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma6/sdma6_4_2_2_sh_mask.h2956
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma7/sdma7_4_2_2_offset.h1043
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/sdma7/sdma7_4_2_2_sh_mask.h2956
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_d.h44
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_6_0_sh_mask.h188
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_offset.h102
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_10_0_2_sh_mask.h184
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h507
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h1096
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_6_offset.h35
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_6_sh_mask.h41
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_offset.h30
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_sh_mask.h33
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_13_0_2_offset.h516
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_13_0_2_sh_mask.h1163
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_13_0_3_offset.h177
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_13_0_3_sh_mask.h428
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_13_0_6_offset.h517
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_13_0_6_sh_mask.h1178
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_offset.h511
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_14_0_2_sh_mask.h1106
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/SMUIO/smuio_9_0_offset.h)6
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/SMUIO/smuio_9_0_sh_mask.h)6
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_10_0_default.h141
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_10_0_offset.h257
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_10_0_sh_mask.h885
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h58
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h99
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_13_0_2_offset.h346
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_13_0_2_sh_mask.h1297
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_14_0_2_offset.h228
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_14_0_2_sh_mask.h940
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_9_0_default.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/THM/thm_9_0_default.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_9_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/THM/thm_9_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/thm/thm_9_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/THM/thm_9_0_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_12_0_0_offset.h33
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_12_0_0_sh_mask.h95
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_0_default.h31
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_0_offset.h52
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_0_sh_mask.h36
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_offset.h33
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_1_sh_mask.h91
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_offset.h33
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_sh_mask.h91
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_7_0_offset.h2626
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_7_0_sh_mask.h10796
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_offset.h35
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_10_0_sh_mask.h97
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_offset.h29
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_14_0_sh_mask.h37
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_7_0_offset.h33
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/umc/umc_8_7_0_sh_mask.h100
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_3_1_d.h98
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_3_1_sh_mask.h804
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_4_0_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h15
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_7_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/UVD/uvd_7_0_offset.h)3
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_7_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/UVD/uvd_7_0_sh_mask.h)20
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vce/vce_4_0_default.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/VCE/vce_4_0_default.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vce/vce_4_0_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/VCE/vce_4_0_offset.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vce/vce_4_0_sh_mask.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/VCE/vce_4_0_sh_mask.h)0
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h422
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h1358
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_0_0_offset.h1008
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_0_0_sh_mask.h3815
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h1005
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_sh_mask.h3660
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_6_0_offset.h1462
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_6_0_sh_mask.h4535
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_3_0_0_offset.h1542
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_3_0_0_sh_mask.h5530
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_0_offset.h2032
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_0_sh_mask.h8937
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_offset.h2367
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_3_sh_mask.h10919
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_5_offset.h1797
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_4_0_5_sh_mask.h8614
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_offset.h1694
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_5_0_0_sh_mask.h7666
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/DC/dce_12_0_default.h9868
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/HDP/hdp_4_0_default.h117
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/NBIF/nbif_6_1_default.h1271
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/OSSSYS/osssys_4_0_default.h176
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/SDMA1/sdma1_4_0_default.h282
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/SMUIO/smuio_9_0_default.h100
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vega10/UVD/uvd_7_0_default.h127
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vpe/vpe_6_1_0_offset.h1553
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vpe/vpe_6_1_0_sh_mask.h4393
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h69
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h29
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h69
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h29
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_6_1_0_sh_mask.h87
-rw-r--r--drivers/gpu/drm/amd/include/atom-bits.h2
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h92
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h2429
-rw-r--r--drivers/gpu/drm/amd/include/beige_goby_ip_offset.h1272
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h392
-rw-r--r--drivers/gpu/drm/amd/include/cgs_linux.h119
-rw-r--r--drivers/gpu/drm/amd/include/cik_structs.h3
-rw-r--r--drivers/gpu/drm/amd/include/cyan_skillfish_ip_offset.h712
-rw-r--r--drivers/gpu/drm/amd/include/dimgrey_cavefish_ip_offset.h1047
-rw-r--r--drivers/gpu/drm/amd/include/discovery.h449
-rw-r--r--drivers/gpu/drm/amd/include/dm_pp_interface.h179
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/dcn/irqsrcs_dcn_1_0.h1138
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_10_1.h53
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h80
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_12_0_0.h74
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_9_0.h55
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/isp/irqsrcs_isp_4_1.h62
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/ivsrcid_vislands30.h98
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h42
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/sdma0/irqsrcs_sdma0_4_0.h50
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/sdma0/irqsrcs_sdma0_5_0.h43
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/sdma1/irqsrcs_sdma1_4_0.h50
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/sdma1/irqsrcs_sdma1_5_0.h44
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/sdma2/irqsrcs_sdma2_5_0.h45
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/sdma3/irqsrcs_sdma3_5_0.h45
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/smuio/irqsrcs_smuio_9_0.h32
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/thm/irqsrcs_thm_9_0.h33
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/uvd/irqsrcs_uvd_7_0.h34
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vce/irqsrcs_vce_4_0.h34
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_1_0.h34
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_2_0.h36
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_4_0.h44
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vcn/irqsrcs_vcn_5_0.h47
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vmc/irqsrcs_vmc_1_0.h37
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/vpe/irqsrcs_vpe_6_1.h40
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h343
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h1706
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h708
-rw-r--r--drivers/gpu/drm/amd/include/mes_v12_api_def.h906
-rw-r--r--drivers/gpu/drm/amd/include/navi10_enum.h22764
-rw-r--r--drivers/gpu/drm/amd/include/navi10_ip_offset.h855
-rw-r--r--drivers/gpu/drm/amd/include/navi12_ip_offset.h1117
-rw-r--r--drivers/gpu/drm/amd/include/navi14_ip_offset.h1117
-rw-r--r--drivers/gpu/drm/amd/include/pptable.h146
-rw-r--r--drivers/gpu/drm/amd/include/renoir_ip_offset.h1396
-rw-r--r--drivers/gpu/drm/amd/include/sienna_cichlid_ip_offset.h1166
-rw-r--r--drivers/gpu/drm/amd/include/soc15_hw_ip.h104
-rw-r--r--drivers/gpu/drm/amd/include/soc15_ih_clientid.h113
-rw-r--r--drivers/gpu/drm/amd/include/soc21_enum.h22477
-rw-r--r--drivers/gpu/drm/amd/include/soc24_enum.h21073
-rw-r--r--drivers/gpu/drm/amd/include/umsch_mm_4_0_api_def.h446
-rw-r--r--drivers/gpu/drm/amd/include/v10_structs.h1257
-rw-r--r--drivers/gpu/drm/amd/include/v11_structs.h1189
-rw-r--r--drivers/gpu/drm/amd/include/v12_structs.h1189
-rw-r--r--drivers/gpu/drm/amd/include/v9_structs.h93
-rw-r--r--drivers/gpu/drm/amd/include/vangogh_ip_offset.h1514
-rw-r--r--drivers/gpu/drm/amd/include/vega10_enum.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/vega10_enum.h)1
-rw-r--r--drivers/gpu/drm/amd/include/vega10_ip_offset.h (renamed from drivers/gpu/drm/amd/include/asic_reg/vega10/soc15ip.h)374
-rw-r--r--drivers/gpu/drm/amd/include/vega20_ip_offset.h1049
-rw-r--r--drivers/gpu/drm/amd/include/vi_structs.h124
-rw-r--r--drivers/gpu/drm/amd/include/yellow_carp_offset.h1365
-rw-r--r--drivers/gpu/drm/amd/pm/Makefile51
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c2141
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c95
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c5046
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h618
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h28
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_pm.h128
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/Makefile32
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/cik_dpm.h (renamed from drivers/gpu/drm/amd/amdgpu/cik_dpm.h)3
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c (renamed from drivers/gpu/drm/amd/amdgpu/kv_dpm.c)425
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.h (renamed from drivers/gpu/drm/amd/amdgpu/kv_dpm.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_smc.c (renamed from drivers/gpu/drm/amd/amdgpu/kv_smc.c)1
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c (renamed from drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c)652
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h37
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/ppsmc.h (renamed from drivers/gpu/drm/amd/amdgpu/ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/r600_dpm.h (renamed from drivers/gpu/drm/amd/amdgpu/r600_dpm.h)3
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c (renamed from drivers/gpu/drm/amd/amdgpu/si_dpm.c)1204
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h (renamed from drivers/gpu/drm/amd/amdgpu/si_dpm.h)19
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c (renamed from drivers/gpu/drm/amd/amdgpu/si_smc.c)68
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/sislands_smc.h (renamed from drivers/gpu/drm/amd/amdgpu/sislands_smc.h)107
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/Makefile35
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c1633
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/Makefile44
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.c189
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ci_baco.h29
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/common_baco.c120
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/common_baco.h63
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/fiji_baco.c188
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/fiji_baco.h29
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c)283
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr.c566
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/hwmgr_ppt.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h)8
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/polaris_baco.c212
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/polaris_baco.h29
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_overdriver.c1288
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_overdriver.h (renamed from drivers/gpu/drm/amd/powerplay/inc/pp_asicblocks.h)35
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c308
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.h40
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c)924
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomctrl.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h)60
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.c632
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h)95
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/pppcielanes.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/pppcielanes.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/pppcielanes.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/pppcielanes.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/pptable_v1_0.h)101
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c)561
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c)425
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/processpptables.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c1692
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h325
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_inc.h43
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.c90
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_baco.h32
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c)176
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_clockpowergating.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h)9
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_dyn_defaults.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_dyn_defaults.h)2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c)2806
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h)67
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c)468
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_powertune.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c)338
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h)9
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c2079
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.h)82
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.c65
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu9_baco.h31
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c769
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.h241
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/tonga_baco.c221
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/tonga_baco.h29
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_baco.c117
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_baco.h29
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c)3701
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h)58
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_inc.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_inc.h)23
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.c1366
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_powertune.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h)16
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h)108
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c)359
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h)1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c657
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.h (renamed from drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h)19
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_baco.c115
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_baco.h29
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c2994
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.h457
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_inc.h40
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_pptable.h108
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c397
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.h58
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c310
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.h66
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.c120
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_baco.h (renamed from drivers/gpu/drm/amd/powerplay/inc/pp_instance.h)28
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c4456
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.h588
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_inc.h36
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_powertune.c72
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_powertune.h32
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_pptable.h139
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c395
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.h31
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c356
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.h71
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/amd_powerplay.h35
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/cz_ppsmc.h (renamed from drivers/gpu/drm/amd/powerplay/inc/cz_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/fiji_ppsmc.h (renamed from drivers/gpu/drm/amd/powerplay/inc/fiji_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h (renamed from drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h)59
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h (renamed from drivers/gpu/drm/amd/powerplay/inc/hwmgr.h)471
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/polaris10_pwrvirus.h1793
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h (renamed from drivers/gpu/drm/amd/powerplay/inc/power_state.h)12
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/pp_debug.h (renamed from drivers/gpu/drm/amd/powerplay/inc/pp_debug.h)8
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/pp_endian.h (renamed from drivers/gpu/drm/amd/powerplay/inc/pp_endian.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/pp_thermal.h (renamed from drivers/gpu/drm/amd/powerplay/eventmgr/psm.h)28
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/ppinterrupt.h (renamed from drivers/gpu/drm/amd/powerplay/inc/ppinterrupt.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/rv_ppsmc.h96
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu10.h188
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu10_driver_if.h117
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h895
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu7.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu7.h)25
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu71.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu71.h)22
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu71_discrete.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu71_discrete.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu72.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu72.h)23
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu72_discrete.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu72_discrete.h)23
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu73.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu73.h)45
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu73_discrete.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu73_discrete.h)73
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu74.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu74.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu74_discrete.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h)3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu75.h760
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu75_discrete.h886
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu7_common.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu7_common.h)4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu7_discrete.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu7_discrete.h)3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu7_fusion.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu7_fusion.h)42
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu7_ppsmc.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu7_ppsmc.h)11
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu8.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu8.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu8_fusion.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu8_fusion.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu9.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu9.h)17
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h)69
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu_ucode_xfer_cz.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_cz.h)2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu_ucode_xfer_vi.h (renamed from drivers/gpu/drm/amd/powerplay/inc/smu_ucode_xfer_vi.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smumgr.h118
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/tonga_ppsmc.h (renamed from drivers/gpu/drm/amd/powerplay/inc/tonga_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/vega10_ppsmc.h (renamed from drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h)12
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/vega12/smu9_driver_if.h769
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/vega12_ppsmc.h123
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/vega20_ppsmc.h131
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/Makefile33
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c2990
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.h76
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.c (renamed from drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c)879
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/fiji_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h)18
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c (renamed from drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c)556
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h)3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.c (renamed from drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c)1178
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/polaris10_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h)12
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c322
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.h50
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c568
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h)50
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.c910
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu8_smumgr.h99
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.c174
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.h32
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smumgr.c255
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c (renamed from drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c)836
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.h)23
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c399
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h)43
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.c413
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega12_smumgr.h57
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c644
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h)59
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.c2300
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vegam_smumgr.h75
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/Makefile36
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c4154
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h1793
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/aldebaran_ppsmc.h131
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/arcturus_ppsmc.h134
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h933
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_cyan_skillfish.h79
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h1222
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h1835
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h284
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu12_driver_if.h232
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h557
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h1632
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h282
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_5.h139
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_6.h243
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h1622
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_yellow_carp.h222
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0.h1889
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h263
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h143
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_ppsmc.h141
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_pmfw.h123
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h122
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_8_pmfw.h152
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_8_ppsmc.h77
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v12_0_ppsmc.h106
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h150
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h373
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_ppsmc.h148
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_1_pmfw.h141
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_1_ppsmc.h97
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_4_pmfw.h137
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_4_ppsmc.h137
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_5_pmfw.h126
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_5_ppsmc.h74
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_pmfw.h471
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_6_ppsmc.h123
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h140
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_pmfw.h194
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_0_ppsmc.h141
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v14_0_2_ppsmc.h150
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h192
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h478
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h307
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h199
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h167
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h66
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h302
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_0_pptable.h198
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h198
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h169
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h249
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0_2_pptable.h204
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/Makefile35
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c2027
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.h72
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c605
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.h29
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c3617
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.h54
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c3229
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h (renamed from drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.h)36
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c2182
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c2557
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h59
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/Makefile31
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c1505
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.h34
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c408
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile31
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c2158
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.h72
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c2510
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c3301
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.h28
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c936
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c1147
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.h28
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c1139
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.h29
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c3942
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h102
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c2887
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.h28
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c1366
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h28
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/Makefile30
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c1977
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c1722
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.h28
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c2931
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.h28
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c1197
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h206
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_internal.h105
-rw-r--r--drivers/gpu/drm/amd/powerplay/Makefile22
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c1479
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/Makefile11
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c291
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c195
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c215
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h59
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c104
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c410
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h100
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c445
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h89
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/psm.c119
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile17
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c243
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c1958
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c161
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c861
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c114
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c417
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h555
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c160
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c783
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h439
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/eventmanager.h109
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/eventmgr.h124
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/fiji_pwrvirus.h10299
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h412
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/polaris10_pwrvirus.h10087
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h311
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile11
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c858
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.h98
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h52
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c523
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c244
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c413
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c611
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c395
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c213
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c584
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/Makefile0
-rw-r--r--drivers/gpu/drm/amd/ras/rascore/ras_core_status.h37
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h59
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c661
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h163
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c173
2531 files changed, 4951841 insertions, 113984 deletions
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig
index e503e3d6d920..216d932a7831 100644
--- a/drivers/gpu/drm/amd/acp/Kconfig
+++ b/drivers/gpu/drm/amd/acp/Kconfig
@@ -1,11 +1,13 @@
+# SPDX-License-Identifier: MIT
menu "ACP (Audio CoProcessor) Configuration"
+ depends on DRM_AMDGPU
config DRM_AMD_ACP
- bool "Enable AMD Audio CoProcessor IP support"
- depends on DRM_AMDGPU
- select MFD_CORE
- select PM_GENERIC_DOMAINS if PM
- help
+ bool "Enable AMD Audio CoProcessor IP support"
+ depends on DRM_AMDGPU
+ select MFD_CORE
+ select PM_GENERIC_DOMAINS if PM
+ help
Choose this option to enable ACP IP support for AMD SOCs.
This adds the ACP (Audio CoProcessor) IP driver and wires
it up into the amdgpu driver. The ACP block provides the DMA
diff --git a/drivers/gpu/drm/amd/acp/Makefile b/drivers/gpu/drm/amd/acp/Makefile
index 8a08e81ee90d..d4176a3fb706 100644
--- a/drivers/gpu/drm/amd/acp/Makefile
+++ b/drivers/gpu/drm/amd/acp/Makefile
@@ -1,4 +1,25 @@
#
+# Copyright 2017 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
# Makefile for the ACP, which is a sub-component
# of AMDSOC/AMDGPU drm driver.
# It provides the HW control for ACP related functionalities.
diff --git a/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h b/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h
index a72ddb2f69ac..b26710cae801 100644
--- a/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h
+++ b/drivers/gpu/drm/amd/acp/include/acp_gfx_if.h
@@ -19,13 +19,12 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
-*/
+ */
#ifndef _ACP_GFX_IF_H
#define _ACP_GFX_IF_H
#include <linux/types.h>
-#include "cgs_linux.h"
#include "cgs_common.h"
int amd_acp_hw_init(struct cgs_device *cgs_device,
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 61360e27715f..1acfed2f92ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -1,35 +1,100 @@
+# SPDX-License-Identifier: MIT
+
+config DRM_AMDGPU
+ tristate "AMD GPU"
+ depends on DRM && PCI
+ depends on !UML
+ select FW_LOADER
+ select DRM_CLIENT
+ select DRM_CLIENT_SELECTION
+ select DRM_DISPLAY_DP_HELPER
+ select DRM_DISPLAY_DSC_HELPER
+ select DRM_DISPLAY_HDMI_HELPER
+ select DRM_DISPLAY_HDCP_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_KMS_HELPER
+ select DRM_SCHED
+ select DRM_TTM
+ select DRM_TTM_HELPER
+ select POWER_SUPPLY
+ select HWMON
+ select I2C
+ select I2C_ALGOBIT
+ select CRC16
+ select BACKLIGHT_CLASS_DEVICE
+ select INTERVAL_TREE
+ select DRM_BUDDY
+ select DRM_SUBALLOC_HELPER
+ select DRM_EXEC
+ select DRM_PANEL_BACKLIGHT_QUIRKS
+ # amdgpu depends on ACPI_VIDEO when ACPI is enabled, for select to work
+ # ACPI_VIDEO's dependencies must also be selected.
+ select INPUT if ACPI
+ select ACPI_VIDEO if ACPI
+ # On x86 ACPI_VIDEO also needs ACPI_WMI
+ select X86_PLATFORM_DEVICES if ACPI && X86
+ select ACPI_WMI if ACPI && X86
+ help
+ Choose this option if you have a recent AMD Radeon graphics card.
+
+ If M is selected, the module will be called amdgpu.
+
config DRM_AMDGPU_SI
bool "Enable amdgpu support for SI parts"
depends on DRM_AMDGPU
help
Choose this option if you want to enable experimental support
- for SI asics.
+ for SI (Southern Islands) asics.
+
+ SI is already supported in radeon. Experimental support for SI
+ in amdgpu will be disabled by default and is still provided by
+ radeon. Use module options to override this:
+
+ radeon.si_support=0 amdgpu.si_support=1
config DRM_AMDGPU_CIK
bool "Enable amdgpu support for CIK parts"
depends on DRM_AMDGPU
help
- Choose this option if you want to enable experimental support
- for CIK asics.
+ Choose this option if you want to enable support for CIK (Sea
+ Islands) asics.
+
+ CIK is already supported in radeon. Support for CIK in amdgpu
+ will be disabled by default and is still provided by radeon.
+ Use module options to override this:
- CIK is already supported in radeon. CIK support in amdgpu
- is for experimentation and testing.
+ radeon.cik_support=0 amdgpu.cik_support=1
config DRM_AMDGPU_USERPTR
bool "Always enable userptr write support"
depends on DRM_AMDGPU
+ select HMM_MIRROR
select MMU_NOTIFIER
help
- This option selects CONFIG_MMU_NOTIFIER if it isn't already
- selected to enabled full userptr support.
+ This option selects CONFIG_HMM and CONFIG_HMM_MIRROR if it
+ isn't already selected to enabled full userptr support.
+
+config DRM_AMD_ISP
+ bool "Enable AMD Image Signal Processor IP support"
+ depends on DRM_AMDGPU && ACPI
+ select MFD_CORE
+ select PM_GENERIC_DOMAINS if PM
+ help
+ Choose this option to enable ISP IP support for AMD SOCs.
+ This adds the ISP (Image Signal Processor) IP driver and wires
+ it up into the amdgpu driver. It is required for cameras
+ on APUs which utilize mipi cameras.
-config DRM_AMDGPU_GART_DEBUGFS
- bool "Allow GART access through debugfs"
+config DRM_AMDGPU_WERROR
+ bool "Force the compiler to throw an error instead of a warning when compiling"
depends on DRM_AMDGPU
- depends on DEBUG_FS
+ depends on EXPERT
+ depends on !COMPILE_TEST
default n
help
- Selecting this option creates a debugfs file to inspect the mapped
- pages. Uses more memory for housekeeping, enable only for debugging.
+ Add -Werror to the build flags for amdgpu.ko.
+ Only enable this if you are warning code for amdgpu.ko.
source "drivers/gpu/drm/amd/acp/Kconfig"
+source "drivers/gpu/drm/amd/display/Kconfig"
+source "drivers/gpu/drm/amd/amdkfd/Kconfig"
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 660786aba7d2..64e7acff8f18 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -1,46 +1,113 @@
#
+# Copyright 2017-2024 Advanced Micro Devices, Inc. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
FULL_AMD_PATH=$(src)/..
+DISPLAY_FOLDER_NAME=display
+FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME)
-ccflags-y := -Iinclude/drm -I$(FULL_AMD_PATH)/include/asic_reg \
+ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_PATH)/include \
-I$(FULL_AMD_PATH)/amdgpu \
- -I$(FULL_AMD_PATH)/scheduler \
- -I$(FULL_AMD_PATH)/powerplay/inc \
- -I$(FULL_AMD_PATH)/acp/include
+ -I$(FULL_AMD_PATH)/pm/inc \
+ -I$(FULL_AMD_PATH)/acp/include \
+ -I$(FULL_AMD_DISPLAY_PATH) \
+ -I$(FULL_AMD_DISPLAY_PATH)/include \
+ -I$(FULL_AMD_DISPLAY_PATH)/modules/inc \
+ -I$(FULL_AMD_DISPLAY_PATH)/dc \
+ -I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \
+ -I$(FULL_AMD_PATH)/amdkfd
+
+# Locally disable W=1 warnings enabled in drm subsystem Makefile
+subdir-ccflags-y += -Wno-override-init
+subdir-ccflags-$(CONFIG_DRM_AMDGPU_WERROR) += -Werror
amdgpu-y := amdgpu_drv.o
# add KMS driver
-amdgpu-y += amdgpu_device.o amdgpu_kms.o \
+amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \
amdgpu_atombios.o atombios_crtc.o amdgpu_connectors.o \
atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_gart.o \
amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \
- amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \
- amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \
- amdgpu_pm.o atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
+ amdgpu_gem.o amdgpu_ring.o \
+ amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o \
+ atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \
atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
- amdgpu_prime.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
+ amdgpu_dma_buf.o amdgpu_vm.o amdgpu_vm_pt.o amdgpu_vm_tlb_fence.o \
+ amdgpu_ib.o amdgpu_pll.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
- amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o
+ amdgpu_gtt_mgr.o amdgpu_preempt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o \
+ amdgpu_atomfirmware.o amdgpu_vf_error.o amdgpu_sched.o \
+ amdgpu_debugfs.o amdgpu_ids.o amdgpu_gmc.o amdgpu_mmhub.o amdgpu_hdp.o \
+ amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
+ amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
+ amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
+ amdgpu_fw_attestation.o amdgpu_securedisplay.o \
+ amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \
+ amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o amdgpu_dev_coredump.o \
+ amdgpu_cper.o amdgpu_userq_fence.o amdgpu_eviction_fence.o amdgpu_ip.o
+
+amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
+
+amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
# add asic specific block
-amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
- ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
- amdgpu_amdkfd_gfx_v7.o
+amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o \
+ dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o
+
+amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o \
+ uvd_v3_1.o
-amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o
+amdgpu-y += \
+ vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
+ vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \
+ nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o soc24.o \
+ sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o \
+ nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o nbif_v6_3_1.o \
+ cyan_skillfish_reg_init.o
+# add DF block
amdgpu-y += \
- vi.o mxgpu_vi.o nbio_v6_1.o soc15.o mxgpu_ai.o
+ df_v1_7.o \
+ df_v3_6.o \
+ df_v4_3.o \
+ df_v4_6_2.o \
+ df_v4_15.o
# add GMC block
amdgpu-y += \
gmc_v7_0.o \
gmc_v8_0.o \
- gfxhub_v1_0.o mmhub_v1_0.o gmc_v9_0.o
+ gfxhub_v1_0.o mmhub_v1_0.o gmc_v9_0.o gfxhub_v1_1.o mmhub_v9_4.o \
+ gfxhub_v2_0.o mmhub_v2_0.o gmc_v10_0.o gfxhub_v2_1.o mmhub_v2_3.o \
+ mmhub_v1_7.o gfxhub_v3_0.o mmhub_v3_0.o mmhub_v3_0_2.o gmc_v11_0.o \
+ mmhub_v3_0_1.o gfxhub_v3_0_3.o gfxhub_v1_2.o mmhub_v1_8.o mmhub_v3_3.o \
+ gfxhub_v11_5_0.o mmhub_v4_1_0.o gfxhub_v12_0.o gmc_v12_0.o
+
+# add UMC block
+amdgpu-y += \
+ umc_v6_0.o umc_v6_1.o umc_v6_7.o umc_v8_7.o umc_v8_10.o umc_v12_0.o umc_v8_14.o
# add IH block
amdgpu-y += \
@@ -49,35 +116,68 @@ amdgpu-y += \
iceland_ih.o \
tonga_ih.o \
cz_ih.o \
- vega10_ih.o
+ vega10_ih.o \
+ vega20_ih.o \
+ navi10_ih.o \
+ ih_v6_0.o \
+ ih_v6_1.o \
+ ih_v7_0.o
# add PSP block
amdgpu-y += \
amdgpu_psp.o \
- psp_v3_1.o
-
-# add SMC block
-amdgpu-y += \
- amdgpu_dpm.o \
- amdgpu_powerplay.o
+ psp_v3_1.o \
+ psp_v10_0.o \
+ psp_v11_0.o \
+ psp_v11_0_8.o \
+ psp_v12_0.o \
+ psp_v13_0.o \
+ psp_v13_0_4.o \
+ psp_v14_0.o
# add DCE block
amdgpu-y += \
dce_v10_0.o \
- dce_v11_0.o \
- dce_virtual.o
+ amdgpu_vkms.o
# add GFX block
amdgpu-y += \
amdgpu_gfx.o \
+ amdgpu_rlc.o \
gfx_v8_0.o \
- gfx_v9_0.o
+ gfx_v9_0.o \
+ gfx_v9_4.o \
+ gfx_v9_4_2.o \
+ gfx_v9_4_3.o \
+ gfx_v10_0.o \
+ imu_v11_0.o \
+ gfx_v11_0.o \
+ gfx_v11_0_3.o \
+ imu_v11_0_3.o \
+ gfx_v12_0.o \
+ imu_v12_0.o
# add async DMA block
amdgpu-y += \
+ amdgpu_sdma.o \
sdma_v2_4.o \
sdma_v3_0.o \
- sdma_v4_0.o
+ sdma_v4_0.o \
+ sdma_v4_4.o \
+ sdma_v4_4_2.o \
+ sdma_v5_0.o \
+ sdma_v5_2.o \
+ sdma_v6_0.o \
+ sdma_v7_0.o
+
+# add MES block
+amdgpu-y += \
+ amdgpu_mes.o \
+ mes_v11_0.o \
+ mes_v12_0.o \
+
+# add GFX userqueue support
+amdgpu-y += mes_userqueue.o
# add UVD block
amdgpu-y += \
@@ -92,19 +192,101 @@ amdgpu-y += \
vce_v3_0.o \
vce_v4_0.o
+# add VCN and JPEG block
+amdgpu-y += \
+ amdgpu_vcn.o \
+ vcn_sw_ring.o \
+ vcn_v1_0.o \
+ vcn_v2_0.o \
+ vcn_v2_5.o \
+ vcn_v3_0.o \
+ vcn_v4_0.o \
+ vcn_v4_0_3.o \
+ vcn_v4_0_5.o \
+ vcn_v5_0_0.o \
+ vcn_v5_0_1.o \
+ amdgpu_jpeg.o \
+ jpeg_v1_0.o \
+ jpeg_v2_0.o \
+ jpeg_v2_5.o \
+ jpeg_v3_0.o \
+ jpeg_v4_0.o \
+ jpeg_v4_0_3.o \
+ jpeg_v4_0_5.o \
+ jpeg_v5_0_0.o \
+ jpeg_v5_0_1.o
+
+# add VPE block
+amdgpu-y += \
+ amdgpu_vpe.o \
+ vpe_v6_1.o
+
+# add UMSCH block
+amdgpu-y += \
+ amdgpu_umsch_mm.o \
+ umsch_mm_v4_0.o
+
+#
+# add ATHUB block
+amdgpu-y += \
+ athub_v1_0.o \
+ athub_v2_0.o \
+ athub_v2_1.o \
+ athub_v3_0.o \
+ athub_v4_1_0.o
+
+# add SMUIO block
+amdgpu-y += \
+ smuio_v9_0.o \
+ smuio_v11_0.o \
+ smuio_v11_0_6.o \
+ smuio_v13_0.o \
+ smuio_v13_0_3.o \
+ smuio_v13_0_6.o \
+ smuio_v14_0_2.o
+
+# add reset block
+amdgpu-y += \
+ amdgpu_reset.o
+
+# add MCA block
+amdgpu-y += \
+ mca_v3_0.o
+
# add amdkfd interfaces
+amdgpu-y += amdgpu_amdkfd.o
+
+# add gfx usermode queue
+amdgpu-y += amdgpu_userq.o
+
+ifneq ($(CONFIG_HSA_AMD),)
+AMDKFD_PATH := ../amdkfd
+include $(FULL_AMD_PATH)/amdkfd/Makefile
+amdgpu-y += $(AMDKFD_FILES)
amdgpu-y += \
- amdgpu_amdkfd.o \
- amdgpu_amdkfd_gfx_v8.o
+ amdgpu_amdkfd_fence.o \
+ amdgpu_amdkfd_gpuvm.o \
+ amdgpu_amdkfd_gfx_v8.o \
+ amdgpu_amdkfd_gfx_v9.o \
+ amdgpu_amdkfd_arcturus.o \
+ amdgpu_amdkfd_aldebaran.o \
+ amdgpu_amdkfd_gc_9_4_3.o \
+ amdgpu_amdkfd_gfx_v10.o \
+ amdgpu_amdkfd_gfx_v10_3.o \
+ amdgpu_amdkfd_gfx_v11.o \
+ amdgpu_amdkfd_gfx_v12.o
+
+ifneq ($(CONFIG_DRM_AMDGPU_CIK),)
+amdgpu-y += amdgpu_amdkfd_gfx_v7.o
+endif
+
+endif
# add cgs
amdgpu-y += amdgpu_cgs.o
# GPU scheduler
-amdgpu-y += \
- ../scheduler/gpu_scheduler.o \
- ../scheduler/sched_fence.o \
- amdgpu_job.o
+amdgpu-y += amdgpu_job.o
# ACP componet
ifneq ($(CONFIG_DRM_AMD_ACP),)
@@ -119,12 +301,27 @@ endif
amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o
amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o
-amdgpu-$(CONFIG_MMU_NOTIFIER) += amdgpu_mn.o
+amdgpu-$(CONFIG_HMM_MIRROR) += amdgpu_hmm.o
-include $(FULL_AMD_PATH)/powerplay/Makefile
+include $(FULL_AMD_PATH)/pm/Makefile
amdgpu-y += $(AMD_POWERPLAY_FILES)
-obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
+ifneq ($(CONFIG_DRM_AMD_DC),)
+
+RELATIVE_AMD_DISPLAY_PATH = ../$(DISPLAY_FOLDER_NAME)
+include $(FULL_AMD_DISPLAY_PATH)/Makefile
+
+amdgpu-y += $(AMD_DISPLAY_FILES)
-CFLAGS_amdgpu_trace_points.o := -I$(src)
+endif
+
+# add isp block
+ifneq ($(CONFIG_DRM_AMD_ISP),)
+amdgpu-y += \
+ amdgpu_isp.o \
+ isp_v4_1_0.o \
+ isp_v4_1_1.o
+endif
+
+obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
diff --git a/drivers/gpu/drm/amd/amdgpu/ObjectID.h b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
index 06192698bd96..a0f0a17e224f 100644
--- a/drivers/gpu/drm/amd/amdgpu/ObjectID.h
+++ b/drivers/gpu/drm/amd/amdgpu/ObjectID.h
@@ -119,6 +119,7 @@
#define CONNECTOR_OBJECT_ID_eDP 0x14
#define CONNECTOR_OBJECT_ID_MXM 0x15
#define CONNECTOR_OBJECT_ID_LVDS_eDP 0x16
+#define CONNECTOR_OBJECT_ID_USBC 0x17
/* deleted */
@@ -136,6 +137,7 @@
#define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE 0x02
#define GENERIC_OBJECT_ID_MXM_OPM 0x03
#define GENERIC_OBJECT_ID_STEREO_PIN 0x04 //This object could show up from Misc Object table, it follows ATOM_OBJECT format, and contains one ATOM_OBJECT_GPIO_CNTL_RECORD for the stereo pin
+#define GENERIC_OBJECT_ID_BRACKET_LAYOUT 0x05
/****************************************************/
/* Graphics Object ENUM ID Definition */
@@ -714,6 +716,13 @@
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
GENERIC_OBJECT_ID_STEREO_PIN << OBJECT_ID_SHIFT)
+#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_BRACKET_LAYOUT_ENUM_ID2 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_BRACKET_LAYOUT << OBJECT_ID_SHIFT)
/****************************************************/
/* Object Cap definition - Shared with BIOS */
/****************************************************/
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
new file mode 100644
index 000000000000..9569dc16dd3d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -0,0 +1,469 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "aldebaran.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_dpm.h"
+#include "amdgpu_job.h"
+#include "amdgpu_ring.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_psp.h"
+#include "amdgpu_xgmi.h"
+
+static bool aldebaran_is_mode2_default(struct amdgpu_reset_control *reset_ctl)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2) &&
+ adev->gmc.xgmi.connected_to_cpu))
+ return true;
+
+ return false;
+}
+
+static struct amdgpu_reset_handler *
+aldebaran_get_reset_handler(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_reset_handler *handler;
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+ int i;
+
+ if (reset_context->method == AMD_RESET_METHOD_NONE) {
+ if (aldebaran_is_mode2_default(reset_ctl))
+ reset_context->method = AMD_RESET_METHOD_MODE2;
+ else
+ reset_context->method = amdgpu_asic_reset_method(adev);
+ }
+
+ if (reset_context->method != AMD_RESET_METHOD_NONE) {
+ dev_dbg(adev->dev, "Getting reset handler for method %d\n",
+ reset_context->method);
+ for_each_handler(i, handler, reset_ctl) {
+ if (handler->reset_method == reset_context->method)
+ return handler;
+ }
+ }
+
+ dev_dbg(adev->dev, "Reset handler not found!\n");
+
+ return NULL;
+}
+
+static inline uint32_t aldebaran_get_ip_block_mask(struct amdgpu_device *adev)
+{
+ uint32_t ip_block_mask = BIT(AMD_IP_BLOCK_TYPE_GFX) |
+ BIT(AMD_IP_BLOCK_TYPE_SDMA);
+
+ if (adev->aid_mask)
+ ip_block_mask |= BIT(AMD_IP_BLOCK_TYPE_IH);
+
+ return ip_block_mask;
+}
+
+static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev)
+{
+ uint32_t ip_block_mask = aldebaran_get_ip_block_mask(adev);
+ uint32_t ip_block;
+ int r, i;
+
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ ip_block = BIT(adev->ip_blocks[i].version->type);
+ if (!(ip_block_mask & ip_block))
+ continue;
+
+ r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int
+aldebaran_mode2_prepare_hwcontext(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ int r = 0;
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ dev_dbg(adev->dev, "Aldebaran prepare hw context\n");
+ /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
+ if (!amdgpu_sriov_vf(adev))
+ r = aldebaran_mode2_suspend_ip(adev);
+
+ return r;
+}
+
+static void aldebaran_async_reset(struct work_struct *work)
+{
+ struct amdgpu_reset_handler *handler;
+ struct amdgpu_reset_control *reset_ctl =
+ container_of(work, struct amdgpu_reset_control, reset_work);
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+ int i;
+
+ for_each_handler(i, handler, reset_ctl) {
+ if (handler->reset_method == reset_ctl->active_reset) {
+ dev_dbg(adev->dev, "Resetting device\n");
+ handler->do_reset(adev);
+ break;
+ }
+ }
+}
+
+static int aldebaran_mode2_reset(struct amdgpu_device *adev)
+{
+ /* disable BM */
+ pci_clear_master(adev->pdev);
+ adev->asic_reset_res = amdgpu_dpm_mode2_reset(adev);
+ return adev->asic_reset_res;
+}
+
+static int
+aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+ struct list_head *reset_device_list = reset_context->reset_device_list;
+ struct amdgpu_device *tmp_adev = NULL;
+ int r = 0;
+
+ dev_dbg(adev->dev, "aldebaran perform hw reset\n");
+
+ if (reset_device_list == NULL)
+ return -EINVAL;
+
+ if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2) &&
+ reset_context->hive == NULL) {
+ /* Wrong context, return error */
+ return -EINVAL;
+ }
+
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ mutex_lock(&tmp_adev->reset_cntl->reset_lock);
+ tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_MODE2;
+ }
+ /*
+ * Mode2 reset doesn't need any sync between nodes in XGMI hive, instead launch
+ * them together so that they can be completed asynchronously on multiple nodes
+ */
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ /* For XGMI run all resets in parallel to speed up the process */
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!queue_work(system_unbound_wq,
+ &tmp_adev->reset_cntl->reset_work))
+ r = -EALREADY;
+ } else
+ r = aldebaran_mode2_reset(tmp_adev);
+ if (r) {
+ dev_err(tmp_adev->dev,
+ "ASIC reset failed with error, %d for drm dev, %s",
+ r, adev_to_drm(tmp_adev)->unique);
+ break;
+ }
+ }
+
+ /* For XGMI wait for all resets to complete before proceed */
+ if (!r) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ flush_work(&tmp_adev->reset_cntl->reset_work);
+ r = tmp_adev->asic_reset_res;
+ if (r)
+ break;
+ }
+ }
+ }
+
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ mutex_unlock(&tmp_adev->reset_cntl->reset_lock);
+ tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_NONE;
+ }
+
+ return r;
+}
+
+static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
+{
+ struct amdgpu_firmware_info *ucode_list[AMDGPU_UCODE_ID_MAXIMUM];
+ uint32_t ip_block_mask = aldebaran_get_ip_block_mask(adev);
+ struct amdgpu_firmware_info *ucode;
+ struct amdgpu_ip_block *cmn_block;
+ struct amdgpu_ip_block *ih_block;
+ int ucode_count = 0;
+ int i, r;
+
+ dev_dbg(adev->dev, "Reloading ucodes after reset\n");
+ for (i = 0; i < adev->firmware.max_ucodes; i++) {
+ ucode = &adev->firmware.ucode[i];
+ if (!ucode->fw)
+ continue;
+ switch (ucode->ucode_id) {
+ case AMDGPU_UCODE_ID_SDMA0:
+ case AMDGPU_UCODE_ID_SDMA1:
+ case AMDGPU_UCODE_ID_SDMA2:
+ case AMDGPU_UCODE_ID_SDMA3:
+ case AMDGPU_UCODE_ID_SDMA4:
+ case AMDGPU_UCODE_ID_SDMA5:
+ case AMDGPU_UCODE_ID_SDMA6:
+ case AMDGPU_UCODE_ID_SDMA7:
+ case AMDGPU_UCODE_ID_CP_MEC1:
+ case AMDGPU_UCODE_ID_CP_MEC1_JT:
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
+ case AMDGPU_UCODE_ID_RLC_G:
+ ucode_list[ucode_count++] = ucode;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Reinit NBIF block */
+ cmn_block =
+ amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_COMMON);
+ if (unlikely(!cmn_block)) {
+ dev_err(adev->dev, "Failed to get BIF handle\n");
+ return -EINVAL;
+ }
+ r = amdgpu_ip_block_resume(cmn_block);
+ if (r)
+ return r;
+
+ if (ip_block_mask & BIT(AMD_IP_BLOCK_TYPE_IH)) {
+ ih_block = amdgpu_device_ip_get_ip_block(adev,
+ AMD_IP_BLOCK_TYPE_IH);
+ if (unlikely(!ih_block)) {
+ dev_err(adev->dev, "Failed to get IH handle\n");
+ return -EINVAL;
+ }
+ r = amdgpu_ip_block_resume(ih_block);
+ if (r)
+ return r;
+ }
+
+ /* Reinit GFXHUB */
+ adev->gfxhub.funcs->init(adev);
+ r = adev->gfxhub.funcs->gart_enable(adev);
+ if (r) {
+ dev_err(adev->dev, "GFXHUB gart reenable failed after reset\n");
+ return r;
+ }
+
+ /* Reload GFX firmware */
+ r = psp_load_fw_list(&adev->psp, ucode_list, ucode_count);
+ if (r) {
+ dev_err(adev->dev, "GFX ucode load failed after reset\n");
+ return r;
+ }
+
+ /* Resume RLC, FW needs RLC alive to complete reset process */
+ adev->gfx.rlc.funcs->resume(adev);
+
+ /* Wait for FW reset event complete */
+ r = amdgpu_dpm_wait_for_event(adev, SMU_EVENT_RESET_COMPLETE, 0);
+ if (r) {
+ dev_err(adev->dev,
+ "Failed to get response from firmware after reset\n");
+ return r;
+ }
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!(adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_SDMA))
+ continue;
+
+ r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
+ if (r)
+ return r;
+ }
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!(adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_SDMA ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_COMMON))
+ continue;
+
+ if (adev->ip_blocks[i].version->funcs->late_init) {
+ r = adev->ip_blocks[i].version->funcs->late_init(
+ &adev->ip_blocks[i]);
+ if (r) {
+ dev_err(adev->dev,
+ "late_init of IP block <%s> failed %d after reset\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
+ return r;
+ }
+ }
+ adev->ip_blocks[i].status.late_initialized = true;
+ }
+
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
+
+ return r;
+}
+
+static int
+aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct list_head *reset_device_list = reset_context->reset_device_list;
+ struct amdgpu_device *tmp_adev = NULL;
+ struct amdgpu_ras *con;
+ int r;
+
+ if (reset_device_list == NULL)
+ return -EINVAL;
+
+ if (amdgpu_ip_version(reset_context->reset_req_dev, MP1_HWIP, 0) ==
+ IP_VERSION(13, 0, 2) &&
+ reset_context->hive == NULL) {
+ /* Wrong context, return error */
+ return -EINVAL;
+ }
+
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ amdgpu_set_init_level(tmp_adev,
+ AMDGPU_INIT_LEVEL_RESET_RECOVERY);
+ dev_info(tmp_adev->dev,
+ "GPU reset succeeded, trying to resume\n");
+ /*TBD: Ideally should clear only GFX, SDMA blocks*/
+ amdgpu_ras_clear_err_state(tmp_adev);
+ r = aldebaran_mode2_restore_ip(tmp_adev);
+ if (r)
+ goto end;
+
+ /*
+ * Add this ASIC as tracked as reset was already
+ * complete successfully.
+ */
+ amdgpu_register_gpu_instance(tmp_adev);
+
+ /* Resume RAS, ecc_irq */
+ con = amdgpu_ras_get_context(tmp_adev);
+ if (!amdgpu_sriov_vf(tmp_adev) && con) {
+ if (tmp_adev->sdma.ras &&
+ tmp_adev->sdma.ras->ras_block.ras_late_init) {
+ r = tmp_adev->sdma.ras->ras_block.ras_late_init(tmp_adev,
+ &tmp_adev->sdma.ras->ras_block.ras_comm);
+ if (r) {
+ dev_err(tmp_adev->dev, "SDMA failed to execute ras_late_init! ret:%d\n", r);
+ goto end;
+ }
+ }
+
+ if (tmp_adev->gfx.ras &&
+ tmp_adev->gfx.ras->ras_block.ras_late_init) {
+ r = tmp_adev->gfx.ras->ras_block.ras_late_init(tmp_adev,
+ &tmp_adev->gfx.ras->ras_block.ras_comm);
+ if (r) {
+ dev_err(tmp_adev->dev, "GFX failed to execute ras_late_init! ret:%d\n", r);
+ goto end;
+ }
+ }
+ }
+
+ amdgpu_ras_resume(tmp_adev);
+
+ /* Update PSP FW topology after reset */
+ if (reset_context->hive &&
+ tmp_adev->gmc.xgmi.num_physical_nodes > 1)
+ r = amdgpu_xgmi_update_topology(reset_context->hive,
+ tmp_adev);
+
+ if (!r) {
+ amdgpu_set_init_level(tmp_adev,
+ AMDGPU_INIT_LEVEL_DEFAULT);
+ amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
+
+ r = amdgpu_ib_ring_tests(tmp_adev);
+ if (r) {
+ dev_err(tmp_adev->dev,
+ "ib ring test failed (%d).\n", r);
+ r = -EAGAIN;
+ tmp_adev->asic_reset_res = r;
+ goto end;
+ }
+ }
+ }
+
+end:
+ return r;
+}
+
+static struct amdgpu_reset_handler aldebaran_mode2_handler = {
+ .reset_method = AMD_RESET_METHOD_MODE2,
+ .prepare_env = NULL,
+ .prepare_hwcontext = aldebaran_mode2_prepare_hwcontext,
+ .perform_reset = aldebaran_mode2_perform_reset,
+ .restore_hwcontext = aldebaran_mode2_restore_hwcontext,
+ .restore_env = NULL,
+ .do_reset = aldebaran_mode2_reset,
+};
+
+static struct amdgpu_reset_handler
+ *aldebaran_rst_handlers[AMDGPU_RESET_MAX_HANDLERS] = {
+ &aldebaran_mode2_handler,
+ &xgmi_reset_on_init_handler,
+ };
+
+int aldebaran_reset_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_reset_control *reset_ctl;
+
+ reset_ctl = kzalloc(sizeof(*reset_ctl), GFP_KERNEL);
+ if (!reset_ctl)
+ return -ENOMEM;
+
+ reset_ctl->handle = adev;
+ reset_ctl->async_reset = aldebaran_async_reset;
+ reset_ctl->active_reset = AMD_RESET_METHOD_NONE;
+ reset_ctl->get_reset_handler = aldebaran_get_reset_handler;
+
+ INIT_WORK(&reset_ctl->reset_work, reset_ctl->async_reset);
+ /* Only mode2 is handled through reset control now */
+ reset_ctl->reset_handlers = &aldebaran_rst_handlers;
+
+ adev->reset_cntl = reset_ctl;
+
+ return 0;
+}
+
+int aldebaran_reset_fini(struct amdgpu_device *adev)
+{
+ kfree(adev->reset_cntl);
+ adev->reset_cntl = NULL;
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h b/drivers/gpu/drm/amd/amdgpu/aldebaran.h
index 8fe8ba9434ff..a07db5454d49 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -21,6 +21,12 @@
*
*/
-bool acpi_atcs_functions_supported(void *device, uint32_t index);
-int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise);
-bool acpi_atcs_notify_pcie_device_ready(void *device);
+#ifndef __ALDEBARAN_H__
+#define __ALDEBARAN_H__
+
+#include "amdgpu.h"
+
+int aldebaran_reset_init(struct amdgpu_device *adev);
+int aldebaran_reset_fini(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aldebaran_reg_init.c
new file mode 100644
index 000000000000..28e6c9ab8767
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran_reg_init.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "soc15.h"
+
+#include "soc15_common.h"
+#include "aldebaran_ip_offset.h"
+
+int aldebaran_reg_base_init(struct amdgpu_device *adev)
+{
+ /* HW has more IP blocks, only initialized the block needed by our driver */
+ uint32_t i;
+ for (i = 0 ; i < MAX_INSTANCE ; ++i) {
+ adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
+ adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
+ adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
+ adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
+ adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
+ adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
+ adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
+ adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
+ adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(SDMA0_BASE.instance[i]));
+ adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(SDMA1_BASE.instance[i]));
+ adev->reg_offset[SDMA2_HWIP][i] = (uint32_t *)(&(SDMA2_BASE.instance[i]));
+ adev->reg_offset[SDMA3_HWIP][i] = (uint32_t *)(&(SDMA3_BASE.instance[i]));
+ adev->reg_offset[SDMA4_HWIP][i] = (uint32_t *)(&(SDMA4_BASE.instance[i]));
+ adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
+ adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+ adev->reg_offset[UMC_HWIP][i] = (uint32_t *)(&(UMC_BASE.instance[i]));
+ adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i]));
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 833c3c16501a..2a0df4cabb99 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -28,6 +28,20 @@
#ifndef __AMDGPU_H__
#define __AMDGPU_H__
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) "amdgpu: " fmt
+
+#ifdef dev_fmt
+#undef dev_fmt
+#endif
+
+#define dev_fmt(fmt) "amdgpu: " fmt
+
+#include "amdgpu_ctx.h"
+
#include <linux/atomic.h>
#include <linux/wait.h>
#include <linux/list.h>
@@ -35,18 +49,21 @@
#include <linux/rbtree.h>
#include <linux/hashtable.h>
#include <linux/dma-fence.h>
+#include <linux/pci.h>
-#include <ttm/ttm_bo_api.h>
-#include <ttm/ttm_bo_driver.h>
-#include <ttm/ttm_placement.h>
-#include <ttm/ttm_module.h>
-#include <ttm/ttm_execbuf_util.h>
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_placement.h>
-#include <drm/drmP.h>
-#include <drm/drm_gem.h>
#include <drm/amdgpu_drm.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_ioctl.h>
+
+#include <kgd_kfd_interface.h>
+#include "dm_pp_interface.h"
+#include "kgd_pp_interface.h"
#include "amd_shared.h"
+#include "amdgpu_utils.h"
#include "amdgpu_mode.h"
#include "amdgpu_ih.h"
#include "amdgpu_irq.h"
@@ -57,71 +74,216 @@
#include "amdgpu_sync.h"
#include "amdgpu_ring.h"
#include "amdgpu_vm.h"
-#include "amd_powerplay.h"
#include "amdgpu_dpm.h"
#include "amdgpu_acp.h"
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
-
-#include "gpu_scheduler.h"
+#include "amdgpu_vcn.h"
+#include "amdgpu_jpeg.h"
+#include "amdgpu_vpe.h"
+#include "amdgpu_umsch_mm.h"
+#include "amdgpu_gmc.h"
+#include "amdgpu_gfx.h"
+#include "amdgpu_sdma.h"
+#include "amdgpu_lsdma.h"
+#include "amdgpu_nbio.h"
+#include "amdgpu_hdp.h"
+#include "amdgpu_dm.h"
#include "amdgpu_virt.h"
+#include "amdgpu_csa.h"
+#include "amdgpu_mes_ctx.h"
+#include "amdgpu_gart.h"
+#include "amdgpu_debugfs.h"
+#include "amdgpu_job.h"
+#include "amdgpu_bo_list.h"
+#include "amdgpu_gem.h"
+#include "amdgpu_doorbell.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_discovery.h"
+#include "amdgpu_mes.h"
+#include "amdgpu_umc.h"
+#include "amdgpu_mmhub.h"
+#include "amdgpu_gfxhub.h"
+#include "amdgpu_df.h"
+#include "amdgpu_smuio.h"
+#include "amdgpu_fdinfo.h"
+#include "amdgpu_mca.h"
+#include "amdgpu_aca.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_cper.h"
+#include "amdgpu_xcp.h"
+#include "amdgpu_seq64.h"
+#include "amdgpu_reg_state.h"
+#include "amdgpu_userq.h"
+#include "amdgpu_eviction_fence.h"
+#if defined(CONFIG_DRM_AMD_ISP)
+#include "amdgpu_isp.h"
+#endif
+
+#define MAX_GPU_INSTANCE 64
+
+#define GFX_SLICE_PERIOD_MS 250
+
+struct amdgpu_gpu_instance {
+ struct amdgpu_device *adev;
+ int mgpu_fan_enabled;
+};
+
+struct amdgpu_mgpu_info {
+ struct amdgpu_gpu_instance gpu_ins[MAX_GPU_INSTANCE];
+ struct mutex mutex;
+ uint32_t num_gpu;
+ uint32_t num_dgpu;
+ uint32_t num_apu;
+};
+
+enum amdgpu_ss {
+ AMDGPU_SS_DRV_LOAD,
+ AMDGPU_SS_DEV_D0,
+ AMDGPU_SS_DEV_D3,
+ AMDGPU_SS_DRV_UNLOAD
+};
+
+struct amdgpu_hwip_reg_entry {
+ u32 hwip;
+ u32 inst;
+ u32 seg;
+ u32 reg_offset;
+ const char *reg_name;
+};
+
+struct amdgpu_watchdog_timer {
+ bool timeout_fatal_disable;
+ uint32_t period; /* maxCycles = (1 << period), the number of cycles before a timeout */
+};
+
+#define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH 256
/*
* Modules parameters.
*/
extern int amdgpu_modeset;
-extern int amdgpu_vram_limit;
+extern unsigned int amdgpu_vram_limit;
+extern int amdgpu_vis_vram_limit;
extern int amdgpu_gart_size;
+extern int amdgpu_gtt_size;
extern int amdgpu_moverate;
-extern int amdgpu_benchmarking;
-extern int amdgpu_testing;
extern int amdgpu_audio;
extern int amdgpu_disp_priority;
extern int amdgpu_hw_i2c;
extern int amdgpu_pcie_gen2;
extern int amdgpu_msi;
-extern int amdgpu_lockup_timeout;
+extern char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
extern int amdgpu_dpm;
extern int amdgpu_fw_load_type;
extern int amdgpu_aspm;
extern int amdgpu_runtime_pm;
-extern unsigned amdgpu_ip_block_mask;
+extern uint amdgpu_ip_block_mask;
extern int amdgpu_bapm;
extern int amdgpu_deep_color;
extern int amdgpu_vm_size;
extern int amdgpu_vm_block_size;
+extern int amdgpu_vm_fragment_size;
extern int amdgpu_vm_fault_stop;
extern int amdgpu_vm_debug;
+extern int amdgpu_vm_update_mode;
+extern int amdgpu_exp_hw_support;
+extern int amdgpu_dc;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
-extern int amdgpu_no_evict;
-extern int amdgpu_direct_gma_size;
-extern unsigned amdgpu_pcie_gen_cap;
-extern unsigned amdgpu_pcie_lane_cap;
-extern unsigned amdgpu_cg_mask;
-extern unsigned amdgpu_pg_mask;
+extern uint amdgpu_pcie_gen_cap;
+extern uint amdgpu_pcie_lane_cap;
+extern u64 amdgpu_cg_mask;
+extern uint amdgpu_pg_mask;
+extern uint amdgpu_sdma_phase_quantum;
extern char *amdgpu_disable_cu;
extern char *amdgpu_virtual_display;
-extern unsigned amdgpu_pp_feature_mask;
-extern int amdgpu_vram_page_split;
-extern int amdgpu_ngg;
-extern int amdgpu_prim_buf_per_se;
-extern int amdgpu_pos_buf_per_se;
-extern int amdgpu_cntl_sb_buf_per_se;
-extern int amdgpu_param_buf_per_se;
-
-#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
+extern uint amdgpu_pp_feature_mask;
+extern uint amdgpu_force_long_training;
+extern int amdgpu_lbpw;
+extern int amdgpu_compute_multipipe;
+extern int amdgpu_gpu_recovery;
+extern int amdgpu_emu_mode;
+extern uint amdgpu_smu_memory_pool_size;
+extern int amdgpu_smu_pptable_id;
+extern uint amdgpu_dc_feature_mask;
+extern uint amdgpu_freesync_vid_mode;
+extern uint amdgpu_dc_debug_mask;
+extern uint amdgpu_dc_visual_confirm;
+extern int amdgpu_dm_abm_level;
+extern int amdgpu_backlight;
+extern int amdgpu_damage_clips;
+extern struct amdgpu_mgpu_info mgpu_info;
+extern int amdgpu_ras_enable;
+extern uint amdgpu_ras_mask;
+extern int amdgpu_bad_page_threshold;
+extern bool amdgpu_ignore_bad_page_threshold;
+extern struct amdgpu_watchdog_timer amdgpu_watchdog_timer;
+extern int amdgpu_async_gfx_ring;
+extern int amdgpu_mcbp;
+extern int amdgpu_discovery;
+extern int amdgpu_mes;
+extern int amdgpu_mes_log_enable;
+extern int amdgpu_mes_kiq;
+extern int amdgpu_uni_mes;
+extern int amdgpu_noretry;
+extern int amdgpu_force_asic_type;
+extern int amdgpu_smartshift_bias;
+extern int amdgpu_use_xgmi_p2p;
+extern int amdgpu_mtype_local;
+extern int amdgpu_enforce_isolation;
+#ifdef CONFIG_HSA_AMD
+extern int sched_policy;
+extern bool debug_evictions;
+extern bool no_system_mem_limit;
+extern int halt_if_hws_hang;
+extern uint amdgpu_svm_default_granularity;
+#else
+static const int __maybe_unused sched_policy = KFD_SCHED_POLICY_HWS;
+static const bool __maybe_unused debug_evictions; /* = false */
+static const bool __maybe_unused no_system_mem_limit;
+static const int __maybe_unused halt_if_hws_hang;
+#endif
+#ifdef CONFIG_HSA_AMD_P2P
+extern bool pcie_p2p;
+#endif
+
+extern int amdgpu_tmz;
+extern int amdgpu_reset_method;
+
+#ifdef CONFIG_DRM_AMDGPU_SI
+extern int amdgpu_si_support;
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+extern int amdgpu_cik_support;
+#endif
+extern int amdgpu_num_kcq;
+
+#define AMDGPU_VCNFW_LOG_SIZE (32 * 1024)
+#define AMDGPU_UMSCHFW_LOG_SIZE (32 * 1024)
+extern int amdgpu_vcnfw_log;
+extern int amdgpu_sg_display;
+extern int amdgpu_umsch_mm;
+extern int amdgpu_seamless;
+extern int amdgpu_umsch_mm_fwlog;
+
+extern int amdgpu_user_partt_mode;
+extern int amdgpu_agp;
+extern int amdgpu_rebar;
+
+extern int amdgpu_wbrf;
+extern int amdgpu_user_queue;
+
+#define AMDGPU_VM_MAX_NUM_CTX 4096
+#define AMDGPU_SG_THRESHOLD (256*1024*1024)
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
-/* AMDGPU_IB_POOL_SIZE must be a power of 2 */
-#define AMDGPU_IB_POOL_SIZE 16
#define AMDGPU_DEBUGFS_MAX_COMPONENTS 32
#define AMDGPUFB_CONN_LIMIT 4
#define AMDGPU_BIOS_NUM_SCRATCH 16
-/* max number of IP instances */
-#define AMDGPU_MAX_SDMA_INSTANCES 2
+#define AMDGPU_VBIOS_VGA_ALLOCATION (9 * 1024 * 1024) /* reserve 8MB for vga emulator and 1 MB for FB */
/* hard reset data */
#define AMDGPU_ASIC_RESET_DATA 0x39d5e86b
@@ -143,26 +305,36 @@ extern int amdgpu_param_buf_per_se;
#define AMDGPU_RESET_VCE (1 << 13)
#define AMDGPU_RESET_VCE1 (1 << 14)
-/* GFX current status */
-#define AMDGPU_GFX_NORMAL_MODE 0x00000000L
-#define AMDGPU_GFX_SAFE_MODE 0x00000001L
-#define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L
-#define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L
-#define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
+/* reset mask */
+#define AMDGPU_RESET_TYPE_FULL (1 << 0) /* full adapter reset, mode1/mode2/BACO/etc. */
+#define AMDGPU_RESET_TYPE_SOFT_RESET (1 << 1) /* IP level soft reset */
+#define AMDGPU_RESET_TYPE_PER_QUEUE (1 << 2) /* per queue */
+#define AMDGPU_RESET_TYPE_PER_PIPE (1 << 3) /* per pipe */
/* max cursor sizes (in pixels) */
#define CIK_CURSOR_WIDTH 128
#define CIK_CURSOR_HEIGHT 128
+/* smart shift bias level limits */
+#define AMDGPU_SMARTSHIFT_MAX_BIAS (100)
+#define AMDGPU_SMARTSHIFT_MIN_BIAS (-100)
+
+/* Extra time delay(in ms) to eliminate the influence of temperature momentary fluctuation */
+#define AMDGPU_SWCTF_EXTRA_DELAY 50
+
+struct amdgpu_xcp_mgr;
struct amdgpu_device;
-struct amdgpu_ib;
-struct amdgpu_cs_parser;
-struct amdgpu_job;
struct amdgpu_irq_src;
struct amdgpu_fpriv;
+struct amdgpu_bo_va_mapping;
+struct kfd_vm_fault_info;
+struct amdgpu_hive_info;
+struct amdgpu_reset_context;
+struct amdgpu_reset_control;
enum amdgpu_cp_irq {
- AMDGPU_CP_IRQ_GFX_EOP = 0,
+ AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP = 0,
+ AMDGPU_CP_IRQ_GFX_ME0_PIPE1_EOP,
AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP,
AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP,
AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP,
@@ -175,13 +347,6 @@ enum amdgpu_cp_irq {
AMDGPU_CP_IRQ_LAST
};
-enum amdgpu_sdma_irq {
- AMDGPU_SDMA_IRQ_TRAP0 = 0,
- AMDGPU_SDMA_IRQ_TRAP1,
-
- AMDGPU_SDMA_IRQ_LAST
-};
-
enum amdgpu_thermal_irq {
AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0,
AMDGPU_THERMAL_IRQ_HIGH_TO_LOW,
@@ -193,18 +358,25 @@ enum amdgpu_kiq_irq {
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
AMDGPU_CP_KIQ_IRQ_LAST
};
-
-int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type,
- enum amd_clockgating_state state);
-int amdgpu_set_powergating_state(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type,
- enum amd_powergating_state state);
-void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags);
-int amdgpu_wait_for_idle(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type);
-bool amdgpu_is_idle(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type);
+#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
+#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
+#define MAX_KIQ_REG_TRY 1000
+
+int amdgpu_device_ip_set_clockgating_state(void *dev,
+ enum amd_ip_block_type block_type,
+ enum amd_clockgating_state state);
+int amdgpu_device_ip_set_powergating_state(void *dev,
+ enum amd_ip_block_type block_type,
+ enum amd_powergating_state state);
+void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
+ u64 *flags);
+int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type);
+bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type);
+int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block);
+
+int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block);
#define AMDGPU_MAX_IP_NUM 16
@@ -227,115 +399,28 @@ struct amdgpu_ip_block_version {
struct amdgpu_ip_block {
struct amdgpu_ip_block_status status;
const struct amdgpu_ip_block_version *version;
+ struct amdgpu_device *adev;
};
-int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
- enum amd_ip_block_type type,
- u32 major, u32 minor);
-
-struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
- enum amd_ip_block_type type);
-
-int amdgpu_ip_block_add(struct amdgpu_device *adev,
- const struct amdgpu_ip_block_version *ip_block_version);
-
-/* provided by hw blocks that can move/clear data. e.g., gfx or sdma */
-struct amdgpu_buffer_funcs {
- /* maximum bytes in a single operation */
- uint32_t copy_max_bytes;
-
- /* number of dw to reserve per operation */
- unsigned copy_num_dw;
-
- /* used for buffer migration */
- void (*emit_copy_buffer)(struct amdgpu_ib *ib,
- /* src addr in bytes */
- uint64_t src_offset,
- /* dst addr in bytes */
- uint64_t dst_offset,
- /* number of byte to transfer */
- uint32_t byte_count);
-
- /* maximum bytes in a single operation */
- uint32_t fill_max_bytes;
-
- /* number of dw to reserve per operation */
- unsigned fill_num_dw;
-
- /* used for buffer clearing */
- void (*emit_fill_buffer)(struct amdgpu_ib *ib,
- /* value to write to memory */
- uint32_t src_data,
- /* dst addr in bytes */
- uint64_t dst_offset,
- /* number of byte to fill */
- uint32_t byte_count);
-};
+int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
+ enum amd_ip_block_type type,
+ u32 major, u32 minor);
-/* provided by hw blocks that can write ptes, e.g., sdma */
-struct amdgpu_vm_pte_funcs {
- /* copy pte entries from GART */
- void (*copy_pte)(struct amdgpu_ib *ib,
- uint64_t pe, uint64_t src,
- unsigned count);
- /* write pte one entry at a time with addr mapping */
- void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
- uint64_t value, unsigned count,
- uint32_t incr);
- /* for linear pte/pde updates without addr mapping */
- void (*set_pte_pde)(struct amdgpu_ib *ib,
- uint64_t pe,
- uint64_t addr, unsigned count,
- uint32_t incr, uint64_t flags);
-};
+struct amdgpu_ip_block *
+amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
+ enum amd_ip_block_type type);
-/* provided by the gmc block */
-struct amdgpu_gart_funcs {
- /* flush the vm tlb via mmio */
- void (*flush_gpu_tlb)(struct amdgpu_device *adev,
- uint32_t vmid);
- /* write pte/pde updates using the cpu */
- int (*set_pte_pde)(struct amdgpu_device *adev,
- void *cpu_pt_addr, /* cpu addr of page table */
- uint32_t gpu_page_idx, /* pte/pde to update */
- uint64_t addr, /* addr to write into pte/pde */
- uint64_t flags); /* access flags */
- /* enable/disable PRT support */
- void (*set_prt)(struct amdgpu_device *adev, bool enable);
- /* set pte flags based per asic */
- uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
- uint32_t flags);
- /* adjust mc addr in fb for APU case */
- u64 (*adjust_mc_addr)(struct amdgpu_device *adev, u64 addr);
- uint32_t (*get_invalidate_req)(unsigned int vm_id);
-};
-
-/* provided by the ih block */
-struct amdgpu_ih_funcs {
- /* ring read/write ptr handling, called from interrupt context */
- u32 (*get_wptr)(struct amdgpu_device *adev);
- void (*decode_iv)(struct amdgpu_device *adev,
- struct amdgpu_iv_entry *entry);
- void (*set_rptr)(struct amdgpu_device *adev);
-};
+int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
+ const struct amdgpu_ip_block_version *ip_block_version);
/*
* BIOS.
*/
bool amdgpu_get_bios(struct amdgpu_device *adev);
bool amdgpu_read_bios(struct amdgpu_device *adev);
-
-/*
- * Dummy page
- */
-struct amdgpu_dummy_page {
- struct page *page;
- dma_addr_t addr;
-};
-int amdgpu_dummy_page_init(struct amdgpu_device *adev);
-void amdgpu_dummy_page_fini(struct amdgpu_device *adev);
-
-
+bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev,
+ u8 *bios, u32 length_bytes);
+void amdgpu_bios_release(struct amdgpu_device *adev);
/*
* Clocks
*/
@@ -350,107 +435,10 @@ struct amdgpu_clock {
uint32_t default_mclk;
uint32_t default_sclk;
uint32_t default_dispclk;
- uint32_t current_dispclk;
uint32_t dp_extclk;
uint32_t max_pixel_clock;
};
-/*
- * BO.
- */
-struct amdgpu_bo_list_entry {
- struct amdgpu_bo *robj;
- struct ttm_validate_buffer tv;
- struct amdgpu_bo_va *bo_va;
- uint32_t priority;
- struct page **user_pages;
- int user_invalidated;
-};
-
-struct amdgpu_bo_va_mapping {
- struct list_head list;
- struct rb_node rb;
- uint64_t start;
- uint64_t last;
- uint64_t __subtree_last;
- uint64_t offset;
- uint64_t flags;
-};
-
-/* bo virtual addresses in a specific vm */
-struct amdgpu_bo_va {
- /* protected by bo being reserved */
- struct list_head bo_list;
- struct dma_fence *last_pt_update;
- unsigned ref_count;
-
- /* protected by vm mutex and spinlock */
- struct list_head vm_status;
-
- /* mappings for this bo_va */
- struct list_head invalids;
- struct list_head valids;
-
- /* constant after initialization */
- struct amdgpu_vm *vm;
- struct amdgpu_bo *bo;
-};
-
-#define AMDGPU_GEM_DOMAIN_MAX 0x3
-
-struct amdgpu_bo {
- /* Protected by tbo.reserved */
- u32 prefered_domains;
- u32 allowed_domains;
- struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
- struct ttm_placement placement;
- struct ttm_buffer_object tbo;
- struct ttm_bo_kmap_obj kmap;
- u64 flags;
- unsigned pin_count;
- void *kptr;
- u64 tiling_flags;
- u64 metadata_flags;
- void *metadata;
- u32 metadata_size;
- unsigned prime_shared_count;
- /* list of all virtual address to which this bo
- * is associated to
- */
- struct list_head va;
- /* Constant after initialization */
- struct drm_gem_object gem_base;
- struct amdgpu_bo *parent;
- struct amdgpu_bo *shadow;
-
- struct ttm_bo_kmap_obj dma_buf_vmap;
- struct amdgpu_mn *mn;
- struct list_head mn_list;
- struct list_head shadow_list;
-};
-#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
-
-void amdgpu_gem_object_free(struct drm_gem_object *obj);
-int amdgpu_gem_object_open(struct drm_gem_object *obj,
- struct drm_file *file_priv);
-void amdgpu_gem_object_close(struct drm_gem_object *obj,
- struct drm_file *file_priv);
-unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
-struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
-struct drm_gem_object *
-amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sg);
-struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *gobj,
- int flags);
-int amdgpu_gem_prime_pin(struct drm_gem_object *obj);
-void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
-struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
-void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
-void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
-int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
-
/* sub-allocation manager, it has to be protected by another lock.
* By conception this is an helper for other part of the driver
* like the indirect buffer or semaphore, which both have their
@@ -475,257 +463,14 @@ int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
* alignment).
*/
-#define AMDGPU_SA_NUM_FENCE_LISTS 32
-
struct amdgpu_sa_manager {
- wait_queue_head_t wq;
- struct amdgpu_bo *bo;
- struct list_head *hole;
- struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS];
- struct list_head olist;
- unsigned size;
- uint64_t gpu_addr;
- void *cpu_ptr;
- uint32_t domain;
- uint32_t align;
-};
-
-/* sub-allocation buffer */
-struct amdgpu_sa_bo {
- struct list_head olist;
- struct list_head flist;
- struct amdgpu_sa_manager *manager;
- unsigned soffset;
- unsigned eoffset;
- struct dma_fence *fence;
-};
-
-/*
- * GEM objects.
- */
-void amdgpu_gem_force_release(struct amdgpu_device *adev);
-int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
- int alignment, u32 initial_domain,
- u64 flags, bool kernel,
- struct drm_gem_object **obj);
-
-int amdgpu_mode_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args);
-int amdgpu_mode_dumb_mmap(struct drm_file *filp,
- struct drm_device *dev,
- uint32_t handle, uint64_t *offset_p);
-int amdgpu_fence_slab_init(void);
-void amdgpu_fence_slab_fini(void);
-
-/*
- * GART structures, functions & helpers
- */
-struct amdgpu_mc;
-
-#define AMDGPU_GPU_PAGE_SIZE 4096
-#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
-#define AMDGPU_GPU_PAGE_SHIFT 12
-#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
-
-struct amdgpu_gart {
- dma_addr_t table_addr;
- struct amdgpu_bo *robj;
- void *ptr;
- unsigned num_gpu_pages;
- unsigned num_cpu_pages;
- unsigned table_size;
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- struct page **pages;
-#endif
- bool ready;
-
- /* Asic default pte flags */
- uint64_t gart_pte_flags;
-
- const struct amdgpu_gart_funcs *gart_funcs;
-};
-
-int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
-void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
-int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
-void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
-int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
-void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
-int amdgpu_gart_init(struct amdgpu_device *adev);
-void amdgpu_gart_fini(struct amdgpu_device *adev);
-void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
- int pages);
-int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
- int pages, struct page **pagelist,
- dma_addr_t *dma_addr, uint64_t flags);
-int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
-
-/*
- * VMHUB structures, functions & helpers
- */
-struct amdgpu_vmhub {
- uint32_t ctx0_ptb_addr_lo32;
- uint32_t ctx0_ptb_addr_hi32;
- uint32_t vm_inv_eng0_req;
- uint32_t vm_inv_eng0_ack;
- uint32_t vm_context0_cntl;
- uint32_t vm_l2_pro_fault_status;
- uint32_t vm_l2_pro_fault_cntl;
-};
-
-/*
- * GPU MC structures, functions & helpers
- */
-struct amdgpu_mc {
- resource_size_t aper_size;
- resource_size_t aper_base;
- resource_size_t agp_base;
- /* for some chips with <= 32MB we need to lie
- * about vram size near mc fb location */
- u64 mc_vram_size;
- u64 visible_vram_size;
- u64 gtt_size;
- u64 gtt_start;
- u64 gtt_end;
- u64 vram_start;
- u64 vram_end;
- unsigned vram_width;
- u64 real_vram_size;
- int vram_mtrr;
- u64 gtt_base_align;
- u64 mc_mask;
- const struct firmware *fw; /* MC firmware */
- uint32_t fw_version;
- struct amdgpu_irq_src vm_fault;
- uint32_t vram_type;
- uint32_t srbm_soft_reset;
- struct amdgpu_mode_mc_save save;
- bool prt_warning;
- /* apertures */
- u64 shared_aperture_start;
- u64 shared_aperture_end;
- u64 private_aperture_start;
- u64 private_aperture_end;
- /* protects concurrent invalidation */
- spinlock_t invalidate_lock;
-};
-
-/*
- * GPU doorbell structures, functions & helpers
- */
-typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
-{
- AMDGPU_DOORBELL_KIQ = 0x000,
- AMDGPU_DOORBELL_HIQ = 0x001,
- AMDGPU_DOORBELL_DIQ = 0x002,
- AMDGPU_DOORBELL_MEC_RING0 = 0x010,
- AMDGPU_DOORBELL_MEC_RING1 = 0x011,
- AMDGPU_DOORBELL_MEC_RING2 = 0x012,
- AMDGPU_DOORBELL_MEC_RING3 = 0x013,
- AMDGPU_DOORBELL_MEC_RING4 = 0x014,
- AMDGPU_DOORBELL_MEC_RING5 = 0x015,
- AMDGPU_DOORBELL_MEC_RING6 = 0x016,
- AMDGPU_DOORBELL_MEC_RING7 = 0x017,
- AMDGPU_DOORBELL_GFX_RING0 = 0x020,
- AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0,
- AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1,
- AMDGPU_DOORBELL_IH = 0x1E8,
- AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF,
- AMDGPU_DOORBELL_INVALID = 0xFFFF
-} AMDGPU_DOORBELL_ASSIGNMENT;
-
-struct amdgpu_doorbell {
- /* doorbell mmio */
- resource_size_t base;
- resource_size_t size;
- u32 __iomem *ptr;
- u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */
+ struct drm_suballoc_manager base;
+ struct amdgpu_bo *bo;
+ uint64_t gpu_addr;
+ void *cpu_ptr;
};
/*
- * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space
- */
-typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
-{
- /*
- * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in
- * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range.
- * Compute related doorbells are allocated from 0x00 to 0x8a
- */
-
-
- /* kernel scheduling */
- AMDGPU_DOORBELL64_KIQ = 0x00,
-
- /* HSA interface queue and debug queue */
- AMDGPU_DOORBELL64_HIQ = 0x01,
- AMDGPU_DOORBELL64_DIQ = 0x02,
-
- /* Compute engines */
- AMDGPU_DOORBELL64_MEC_RING0 = 0x03,
- AMDGPU_DOORBELL64_MEC_RING1 = 0x04,
- AMDGPU_DOORBELL64_MEC_RING2 = 0x05,
- AMDGPU_DOORBELL64_MEC_RING3 = 0x06,
- AMDGPU_DOORBELL64_MEC_RING4 = 0x07,
- AMDGPU_DOORBELL64_MEC_RING5 = 0x08,
- AMDGPU_DOORBELL64_MEC_RING6 = 0x09,
- AMDGPU_DOORBELL64_MEC_RING7 = 0x0a,
-
- /* User queue doorbell range (128 doorbells) */
- AMDGPU_DOORBELL64_USERQUEUE_START = 0x0b,
- AMDGPU_DOORBELL64_USERQUEUE_END = 0x8a,
-
- /* Graphics engine */
- AMDGPU_DOORBELL64_GFX_RING0 = 0x8b,
-
- /*
- * Other graphics doorbells can be allocated here: from 0x8c to 0xef
- * Graphics voltage island aperture 1
- * default non-graphics QWORD index is 0xF0 - 0xFF inclusive
- */
-
- /* sDMA engines */
- AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xF0,
- AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1,
- AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xF2,
- AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3,
-
- /* Interrupt handler */
- AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */
- AMDGPU_DOORBELL64_IH_RING1 = 0xF5, /* For page migration request log */
- AMDGPU_DOORBELL64_IH_RING2 = 0xF6, /* For page migration translation/invalidation log */
-
- /* VCN engine use 32 bits doorbell */
- AMDGPU_DOORBELL64_VCN0_1 = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
- AMDGPU_DOORBELL64_VCN2_3 = 0xF9,
- AMDGPU_DOORBELL64_VCN4_5 = 0xFA,
- AMDGPU_DOORBELL64_VCN6_7 = 0xFB,
-
- /* overlap the doorbell assignment with VCN as they are mutually exclusive
- * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
- */
- AMDGPU_DOORBELL64_RING0_1 = 0xF8,
- AMDGPU_DOORBELL64_RING2_3 = 0xF9,
- AMDGPU_DOORBELL64_RING4_5 = 0xFA,
- AMDGPU_DOORBELL64_RING6_7 = 0xFB,
-
- AMDGPU_DOORBELL64_UVD_RING0_1 = 0xFC,
- AMDGPU_DOORBELL64_UVD_RING2_3 = 0xFD,
- AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFE,
- AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFF,
-
- AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF,
- AMDGPU_DOORBELL64_INVALID = 0xFFFF
-} AMDGPU_DOORBELL64_ASSIGNMENT;
-
-
-void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
- phys_addr_t *aperture_base,
- size_t *aperture_size,
- size_t *start_offset);
-
-/*
* IRQS.
*/
@@ -738,80 +483,12 @@ struct amdgpu_flip_work {
uint64_t base;
struct drm_pending_vblank_event *event;
struct amdgpu_bo *old_abo;
- struct dma_fence *excl;
unsigned shared_count;
struct dma_fence **shared;
struct dma_fence_cb cb;
bool async;
};
-
-/*
- * CP & rings.
- */
-
-struct amdgpu_ib {
- struct amdgpu_sa_bo *sa_bo;
- uint32_t length_dw;
- uint64_t gpu_addr;
- uint32_t *ptr;
- uint32_t flags;
-};
-
-extern const struct amd_sched_backend_ops amdgpu_sched_ops;
-
-int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
- struct amdgpu_job **job, struct amdgpu_vm *vm);
-int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
- struct amdgpu_job **job);
-
-void amdgpu_job_free_resources(struct amdgpu_job *job);
-void amdgpu_job_free(struct amdgpu_job *job);
-int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
- struct amd_sched_entity *entity, void *owner,
- struct dma_fence **f);
-
-/*
- * context related structures
- */
-
-struct amdgpu_ctx_ring {
- uint64_t sequence;
- struct dma_fence **fences;
- struct amd_sched_entity entity;
-};
-
-struct amdgpu_ctx {
- struct kref refcount;
- struct amdgpu_device *adev;
- unsigned reset_counter;
- spinlock_t ring_lock;
- struct dma_fence **fences;
- struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
- bool preamble_presented;
-};
-
-struct amdgpu_ctx_mgr {
- struct amdgpu_device *adev;
- struct mutex lock;
- /* protected by lock */
- struct idr ctx_handles;
-};
-
-struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
-int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
-
-uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
- struct dma_fence *fence);
-struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
- struct amdgpu_ring *ring, uint64_t seq);
-
-int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp);
-
-void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
-void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
-
/*
* file private structure
*/
@@ -819,487 +496,160 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
struct amdgpu_fpriv {
struct amdgpu_vm vm;
struct amdgpu_bo_va *prt_va;
+ struct amdgpu_bo_va *csa_va;
+ struct amdgpu_bo_va *seq64_va;
struct mutex bo_list_lock;
struct idr bo_list_handles;
struct amdgpu_ctx_mgr ctx_mgr;
-};
+ struct amdgpu_userq_mgr userq_mgr;
-/*
- * residency list
- */
+ /* Eviction fence infra */
+ struct amdgpu_eviction_fence_mgr evf_mgr;
-struct amdgpu_bo_list {
- struct mutex lock;
- struct amdgpu_bo *gds_obj;
- struct amdgpu_bo *gws_obj;
- struct amdgpu_bo *oa_obj;
- unsigned first_userptr;
- unsigned num_entries;
- struct amdgpu_bo_list_entry *array;
+ /** GPU partition selection */
+ uint32_t xcp_id;
};
-struct amdgpu_bo_list *
-amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
-void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
- struct list_head *validated);
-void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
-void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
-
-/*
- * GFX stuff
- */
-#include "clearstate_defs.h"
-
-struct amdgpu_rlc_funcs {
- void (*enter_safe_mode)(struct amdgpu_device *adev);
- void (*exit_safe_mode)(struct amdgpu_device *adev);
-};
-
-struct amdgpu_rlc {
- /* for power gating */
- struct amdgpu_bo *save_restore_obj;
- uint64_t save_restore_gpu_addr;
- volatile uint32_t *sr_ptr;
- const u32 *reg_list;
- u32 reg_list_size;
- /* for clear state */
- struct amdgpu_bo *clear_state_obj;
- uint64_t clear_state_gpu_addr;
- volatile uint32_t *cs_ptr;
- const struct cs_section_def *cs_data;
- u32 clear_state_size;
- /* for cp tables */
- struct amdgpu_bo *cp_table_obj;
- uint64_t cp_table_gpu_addr;
- volatile uint32_t *cp_table_ptr;
- u32 cp_table_size;
-
- /* safe mode for updating CG/PG state */
- bool in_safe_mode;
- const struct amdgpu_rlc_funcs *funcs;
-
- /* for firmware data */
- u32 save_and_restore_offset;
- u32 clear_state_descriptor_offset;
- u32 avail_scratch_ram_locations;
- u32 reg_restore_list_size;
- u32 reg_list_format_start;
- u32 reg_list_format_separate_start;
- u32 starting_offsets_start;
- u32 reg_list_format_size_bytes;
- u32 reg_list_size_bytes;
-
- u32 *register_list_format;
- u32 *register_restore;
-};
-
-struct amdgpu_mec {
- struct amdgpu_bo *hpd_eop_obj;
- u64 hpd_eop_gpu_addr;
- struct amdgpu_bo *mec_fw_obj;
- u64 mec_fw_gpu_addr;
- u32 num_pipe;
- u32 num_mec;
- u32 num_queue;
- void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS + 1];
-};
-
-struct amdgpu_kiq {
- u64 eop_gpu_addr;
- struct amdgpu_bo *eop_obj;
- struct amdgpu_ring ring;
- struct amdgpu_irq_src irq;
-};
-
-/*
- * GPU scratch registers structures, functions & helpers
- */
-struct amdgpu_scratch {
- unsigned num_reg;
- uint32_t reg_base;
- uint32_t free_mask;
-};
-
-/*
- * GFX configurations
- */
-#define AMDGPU_GFX_MAX_SE 4
-#define AMDGPU_GFX_MAX_SH_PER_SE 2
-
-struct amdgpu_rb_config {
- uint32_t rb_backend_disable;
- uint32_t user_rb_backend_disable;
- uint32_t raster_config;
- uint32_t raster_config_1;
-};
-
-struct gb_addr_config {
- uint16_t pipe_interleave_size;
- uint8_t num_pipes;
- uint8_t max_compress_frags;
- uint8_t num_banks;
- uint8_t num_se;
- uint8_t num_rb_per_se;
-};
-
-struct amdgpu_gfx_config {
- unsigned max_shader_engines;
- unsigned max_tile_pipes;
- unsigned max_cu_per_sh;
- unsigned max_sh_per_se;
- unsigned max_backends_per_se;
- unsigned max_texture_channel_caches;
- unsigned max_gprs;
- unsigned max_gs_threads;
- unsigned max_hw_contexts;
- unsigned sc_prim_fifo_size_frontend;
- unsigned sc_prim_fifo_size_backend;
- unsigned sc_hiz_tile_fifo_size;
- unsigned sc_earlyz_tile_fifo_size;
-
- unsigned num_tile_pipes;
- unsigned backend_enable_mask;
- unsigned mem_max_burst_length_bytes;
- unsigned mem_row_size_in_kb;
- unsigned shader_engine_tile_size;
- unsigned num_gpus;
- unsigned multi_gpu_tile_size;
- unsigned mc_arb_ramcfg;
- unsigned gb_addr_config;
- unsigned num_rbs;
- unsigned gs_vgt_table_depth;
- unsigned gs_prim_buffer_depth;
-
- uint32_t tile_mode_array[32];
- uint32_t macrotile_mode_array[16];
-
- struct gb_addr_config gb_addr_config_fields;
- struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE];
-
- /* gfx configure feature */
- uint32_t double_offchip_lds_buf;
-};
-
-struct amdgpu_cu_info {
- uint32_t number; /* total active CU number */
- uint32_t ao_cu_mask;
- uint32_t wave_front_size;
- uint32_t bitmap[4][4];
-};
-
-struct amdgpu_gfx_funcs {
- /* get the gpu clock counter */
- uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
- void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
- void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields);
- void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst);
- void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst);
-};
-
-struct amdgpu_ngg_buf {
- struct amdgpu_bo *bo;
- uint64_t gpu_addr;
- uint32_t size;
- uint32_t bo_size;
-};
-
-enum {
- NGG_PRIM = 0,
- NGG_POS,
- NGG_CNTL,
- NGG_PARAM,
- NGG_BUF_MAX
-};
-
-struct amdgpu_ngg {
- struct amdgpu_ngg_buf buf[NGG_BUF_MAX];
- uint32_t gds_reserve_addr;
- uint32_t gds_reserve_size;
- bool init;
-};
-
-struct amdgpu_gfx {
- struct mutex gpu_clock_mutex;
- struct amdgpu_gfx_config config;
- struct amdgpu_rlc rlc;
- struct amdgpu_mec mec;
- struct amdgpu_kiq kiq;
- struct amdgpu_scratch scratch;
- const struct firmware *me_fw; /* ME firmware */
- uint32_t me_fw_version;
- const struct firmware *pfp_fw; /* PFP firmware */
- uint32_t pfp_fw_version;
- const struct firmware *ce_fw; /* CE firmware */
- uint32_t ce_fw_version;
- const struct firmware *rlc_fw; /* RLC firmware */
- uint32_t rlc_fw_version;
- const struct firmware *mec_fw; /* MEC firmware */
- uint32_t mec_fw_version;
- const struct firmware *mec2_fw; /* MEC2 firmware */
- uint32_t mec2_fw_version;
- uint32_t me_feature_version;
- uint32_t ce_feature_version;
- uint32_t pfp_feature_version;
- uint32_t rlc_feature_version;
- uint32_t mec_feature_version;
- uint32_t mec2_feature_version;
- struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
- unsigned num_gfx_rings;
- struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
- unsigned num_compute_rings;
- struct amdgpu_irq_src eop_irq;
- struct amdgpu_irq_src priv_reg_irq;
- struct amdgpu_irq_src priv_inst_irq;
- /* gfx status */
- uint32_t gfx_current_status;
- /* ce ram size*/
- unsigned ce_ram_size;
- struct amdgpu_cu_info cu_info;
- const struct amdgpu_gfx_funcs *funcs;
-
- /* reset mask */
- uint32_t grbm_soft_reset;
- uint32_t srbm_soft_reset;
- bool in_reset;
- /* NGG */
- struct amdgpu_ngg ngg;
-};
-
-int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- unsigned size, struct amdgpu_ib *ib);
-void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
- struct dma_fence *f);
-int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
- struct amdgpu_ib *ibs, struct amdgpu_job *job,
- struct dma_fence **f);
-int amdgpu_ib_pool_init(struct amdgpu_device *adev);
-void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
-int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
-
-/*
- * CS.
- */
-struct amdgpu_cs_chunk {
- uint32_t chunk_id;
- uint32_t length_dw;
- void *kdata;
-};
-
-struct amdgpu_cs_parser {
- struct amdgpu_device *adev;
- struct drm_file *filp;
- struct amdgpu_ctx *ctx;
-
- /* chunks */
- unsigned nchunks;
- struct amdgpu_cs_chunk *chunks;
-
- /* scheduler job object */
- struct amdgpu_job *job;
-
- /* buffer objects */
- struct ww_acquire_ctx ticket;
- struct amdgpu_bo_list *bo_list;
- struct amdgpu_bo_list_entry vm_pd;
- struct list_head validated;
- struct dma_fence *fence;
- uint64_t bytes_moved_threshold;
- uint64_t bytes_moved;
- struct amdgpu_bo_list_entry *evictable;
-
- /* user fence */
- struct amdgpu_bo_list_entry uf_entry;
-};
-
-#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */
-#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) /* bit set means preamble IB is first presented in belonging context */
-#define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */
-#define AMDGPU_VM_DOMAIN (1 << 3) /* bit set means in virtual memory context */
-
-struct amdgpu_job {
- struct amd_sched_job base;
- struct amdgpu_device *adev;
- struct amdgpu_vm *vm;
- struct amdgpu_ring *ring;
- struct amdgpu_sync sync;
- struct amdgpu_ib *ibs;
- struct dma_fence *fence; /* the hw fence */
- uint32_t preamble_status;
- uint32_t num_ibs;
- void *owner;
- uint64_t fence_ctx; /* the fence_context this job uses */
- bool vm_needs_flush;
- bool need_pipeline_sync;
- unsigned vm_id;
- uint64_t vm_pd_addr;
- uint32_t gds_base, gds_size;
- uint32_t gws_base, gws_size;
- uint32_t oa_base, oa_size;
-
- /* user fence handling */
- uint64_t uf_addr;
- uint64_t uf_sequence;
-
-};
-#define to_amdgpu_job(sched_job) \
- container_of((sched_job), struct amdgpu_job, base)
-
-static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
- uint32_t ib_idx, int idx)
-{
- return p->job->ibs[ib_idx].ptr[idx];
-}
-
-static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
- uint32_t ib_idx, int idx,
- uint32_t value)
-{
- p->job->ibs[ib_idx].ptr[idx] = value;
-}
+int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
/*
* Writeback
*/
#define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */
+/**
+ * amdgpu_wb - This struct is used for small GPU memory allocation.
+ *
+ * This struct is used to allocate a small amount of GPU memory that can be
+ * used to shadow certain states into the memory. This is especially useful for
+ * providing easy CPU access to some states without requiring register access
+ * (e.g., if some block is power gated, reading register may be problematic).
+ *
+ * Note: the term writeback was initially used because many of the amdgpu
+ * components had some level of writeback memory, and this struct initially
+ * described those components.
+ */
struct amdgpu_wb {
+
+ /**
+ * @wb_obj:
+ *
+ * Buffer Object used for the writeback memory.
+ */
struct amdgpu_bo *wb_obj;
- volatile uint32_t *wb;
- uint64_t gpu_addr;
- u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */
- unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
-};
-int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
-void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
-int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb);
-void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb);
+ /**
+ * @wb:
+ *
+ * Pointer to the first writeback slot. In terms of CPU address
+ * this value can be accessed directly by using the offset as an index.
+ * For the GPU address, it is necessary to use gpu_addr and the offset.
+ */
+ uint32_t *wb;
-void amdgpu_get_pcie_info(struct amdgpu_device *adev);
+ /**
+ * @gpu_addr:
+ *
+ * Writeback base address in the GPU.
+ */
+ uint64_t gpu_addr;
-/*
- * SDMA
- */
-struct amdgpu_sdma_instance {
- /* SDMA firmware */
- const struct firmware *fw;
- uint32_t fw_version;
- uint32_t feature_version;
-
- struct amdgpu_ring ring;
- bool burst_nop;
-};
+ /**
+ * @num_wb:
+ *
+ * Number of writeback slots reserved for amdgpu.
+ */
+ u32 num_wb;
-struct amdgpu_sdma {
- struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
-#ifdef CONFIG_DRM_AMDGPU_SI
- //SI DMA has a difference trap irq number for the second engine
- struct amdgpu_irq_src trap_irq_1;
-#endif
- struct amdgpu_irq_src trap_irq;
- struct amdgpu_irq_src illegal_inst_irq;
- int num_instances;
- uint32_t srbm_soft_reset;
-};
+ /**
+ * @used:
+ *
+ * Track the writeback slot already used.
+ */
+ unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
-/*
- * Firmware
- */
-enum amdgpu_firmware_load_type {
- AMDGPU_FW_LOAD_DIRECT = 0,
- AMDGPU_FW_LOAD_SMU,
- AMDGPU_FW_LOAD_PSP,
+ /**
+ * @lock:
+ *
+ * Protects read and write of the used field array.
+ */
+ spinlock_t lock;
};
-struct amdgpu_firmware {
- struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM];
- enum amdgpu_firmware_load_type load_type;
- struct amdgpu_bo *fw_buf;
- unsigned int fw_size;
- unsigned int max_ucodes;
- /* firmwares are loaded by psp instead of smu from vega10 */
- const struct amdgpu_psp_funcs *funcs;
- struct amdgpu_bo *rbuf;
- struct mutex mutex;
-};
+int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb);
+void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb);
/*
* Benchmarking
*/
-void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
-
-
-/*
- * Testing
- */
-void amdgpu_test_moves(struct amdgpu_device *adev);
-
-/*
- * MMU Notifier
- */
-#if defined(CONFIG_MMU_NOTIFIER)
-int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
-void amdgpu_mn_unregister(struct amdgpu_bo *bo);
-#else
-static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
-{
- return -ENODEV;
-}
-static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
-#endif
-
-/*
- * Debugfs
- */
-struct amdgpu_debugfs {
- const struct drm_info_list *files;
- unsigned num_files;
-};
-
-int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
- const struct drm_info_list *files,
- unsigned nfiles);
-int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
-
-#if defined(CONFIG_DEBUG_FS)
-int amdgpu_debugfs_init(struct drm_minor *minor);
-#endif
-
-int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
-
-/*
- * amdgpu smumgr functions
- */
-struct amdgpu_smumgr_funcs {
- int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype);
- int (*request_smu_load_fw)(struct amdgpu_device *adev);
- int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype);
-};
-
-/*
- * amdgpu smumgr
- */
-struct amdgpu_smumgr {
- struct amdgpu_bo *toc_buf;
- struct amdgpu_bo *smu_buf;
- /* asic priv smu data */
- void *priv;
- spinlock_t smu_lock;
- /* smumgr functions */
- const struct amdgpu_smumgr_funcs *smumgr_funcs;
- /* ucode loading complete flag */
- uint32_t fw_flags;
-};
+int amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
/*
* ASIC specific register table accessible by UMD
*/
struct amdgpu_allowed_register_entry {
uint32_t reg_offset;
- bool untouched;
bool grbm_indexed;
};
+/**
+ * enum amd_reset_method - Methods for resetting AMD GPU devices
+ *
+ * @AMD_RESET_METHOD_NONE: The device will not be reset.
+ * @AMD_RESET_LEGACY: Method reserved for SI, CIK and VI ASICs.
+ * @AMD_RESET_MODE0: Reset the entire ASIC. Not currently available for the
+ * any device.
+ * @AMD_RESET_MODE1: Resets all IP blocks on the ASIC (SDMA, GFX, VCN, etc.)
+ * individually. Suitable only for some discrete GPU, not
+ * available for all ASICs.
+ * @AMD_RESET_MODE2: Resets a lesser level of IPs compared to MODE1. Which IPs
+ * are reset depends on the ASIC. Notably doesn't reset IPs
+ * shared with the CPU on APUs or the memory controllers (so
+ * VRAM is not lost). Not available on all ASICs.
+ * @AMD_RESET_LINK: Triggers SW-UP link reset on other GPUs
+ * @AMD_RESET_BACO: BACO (Bus Alive, Chip Off) method powers off and on the card
+ * but without powering off the PCI bus. Suitable only for
+ * discrete GPUs.
+ * @AMD_RESET_PCI: Does a full bus reset using core Linux subsystem PCI reset
+ * and does a secondary bus reset or FLR, depending on what the
+ * underlying hardware supports.
+ *
+ * Methods available for AMD GPU driver for resetting the device. Not all
+ * methods are suitable for every device. User can override the method using
+ * module parameter `reset_method`.
+ */
+enum amd_reset_method {
+ AMD_RESET_METHOD_NONE = -1,
+ AMD_RESET_METHOD_LEGACY = 0,
+ AMD_RESET_METHOD_MODE0,
+ AMD_RESET_METHOD_MODE1,
+ AMD_RESET_METHOD_MODE2,
+ AMD_RESET_METHOD_LINK,
+ AMD_RESET_METHOD_BACO,
+ AMD_RESET_METHOD_PCI,
+ AMD_RESET_METHOD_ON_INIT,
+};
+
+struct amdgpu_video_codec_info {
+ u32 codec_type;
+ u32 max_width;
+ u32 max_height;
+ u32 max_pixels_per_frame;
+ u32 max_level;
+};
+
+#define codec_info_build(type, width, height, level) \
+ .codec_type = type,\
+ .max_width = width,\
+ .max_height = height,\
+ .max_pixels_per_frame = height * width,\
+ .max_level = level,
+
+struct amdgpu_video_codecs {
+ const u32 codec_count;
+ const struct amdgpu_video_codec_info *codec_array;
+};
+
/*
* ASIC specific functions.
*/
@@ -1311,6 +661,7 @@ struct amdgpu_asic_funcs {
u32 sh_num, u32 reg_offset, u32 *value);
void (*set_vga_state)(struct amdgpu_device *adev, bool state);
int (*reset)(struct amdgpu_device *adev);
+ enum amd_reset_method (*reset_method)(struct amdgpu_device *adev);
/* get the reference clock */
u32 (*get_xclk)(struct amdgpu_device *adev);
/* MM block clocks */
@@ -1321,95 +672,60 @@ struct amdgpu_asic_funcs {
void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
/* get config memsize register */
u32 (*get_config_memsize)(struct amdgpu_device *adev);
+ /* flush hdp write queue */
+ void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
+ /* invalidate hdp read cache */
+ void (*invalidate_hdp)(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
+ /* check if the asic needs a full reset of if soft reset will work */
+ bool (*need_full_reset)(struct amdgpu_device *adev);
+ /* initialize doorbell layout for specific asic*/
+ void (*init_doorbell_index)(struct amdgpu_device *adev);
+ /* PCIe bandwidth usage */
+ void (*get_pcie_usage)(struct amdgpu_device *adev, uint64_t *count0,
+ uint64_t *count1);
+ /* do we need to reset the asic at init time (e.g., kexec) */
+ bool (*need_reset_on_init)(struct amdgpu_device *adev);
+ /* PCIe replay counter */
+ uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev);
+ /* device supports BACO */
+ int (*supports_baco)(struct amdgpu_device *adev);
+ /* pre asic_init quirks */
+ void (*pre_asic_init)(struct amdgpu_device *adev);
+ /* enter/exit umd stable pstate */
+ int (*update_umd_stable_pstate)(struct amdgpu_device *adev, bool enter);
+ /* query video codecs */
+ int (*query_video_codecs)(struct amdgpu_device *adev, bool encode,
+ const struct amdgpu_video_codecs **codecs);
+ /* encode "> 32bits" smn addressing */
+ u64 (*encode_ext_smn_addressing)(int ext_id);
+
+ ssize_t (*get_reg_state)(struct amdgpu_device *adev,
+ enum amdgpu_reg_state reg_state, void *buf,
+ size_t max_size);
};
/*
* IOCTL.
*/
-int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp);
int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
-int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp);
-int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp);
-int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp);
-int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp);
-int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp);
-int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp);
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
-int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
- struct drm_file *filp);
-
/* VRAM scratch page for HDP bug, default vram page */
-struct amdgpu_vram_scratch {
+struct amdgpu_mem_scratch {
struct amdgpu_bo *robj;
- volatile uint32_t *ptr;
+ uint32_t *ptr;
u64 gpu_addr;
};
/*
- * ACPI
- */
-struct amdgpu_atif_notification_cfg {
- bool enabled;
- int command_code;
-};
-
-struct amdgpu_atif_notifications {
- bool display_switch;
- bool expansion_mode_change;
- bool thermal_state;
- bool forced_power_state;
- bool system_power_state;
- bool display_conf_change;
- bool px_gfx_switch;
- bool brightness_change;
- bool dgpu_display_event;
-};
-
-struct amdgpu_atif_functions {
- bool system_params;
- bool sbios_requests;
- bool select_active_disp;
- bool lid_state;
- bool get_tv_standard;
- bool set_tv_standard;
- bool get_panel_expansion_mode;
- bool set_panel_expansion_mode;
- bool temperature_change;
- bool graphics_device_types;
-};
-
-struct amdgpu_atif {
- struct amdgpu_atif_notifications notifications;
- struct amdgpu_atif_functions functions;
- struct amdgpu_atif_notification_cfg notification_cfg;
- struct amdgpu_encoder *encoder_for_bl;
-};
-
-struct amdgpu_atcs_functions {
- bool get_ext_state;
- bool pcie_perf_req;
- bool pcie_dev_rdy;
- bool pcie_bus_width;
-};
-
-struct amdgpu_atcs {
- struct amdgpu_atcs_functions functions;
-};
-
-/*
* CGS
*/
struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
@@ -1421,50 +737,254 @@ void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
+typedef uint32_t (*amdgpu_rreg_ext_t)(struct amdgpu_device*, uint64_t);
+typedef void (*amdgpu_wreg_ext_t)(struct amdgpu_device*, uint64_t, uint32_t);
+
+typedef uint64_t (*amdgpu_rreg64_t)(struct amdgpu_device*, uint32_t);
+typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t);
+
+typedef uint64_t (*amdgpu_rreg64_ext_t)(struct amdgpu_device*, uint64_t);
+typedef void (*amdgpu_wreg64_ext_t)(struct amdgpu_device*, uint64_t, uint64_t);
+
typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
+struct amdgpu_mmio_remap {
+ u32 reg_offset;
+ resource_size_t bus_addr;
+ struct amdgpu_bo *bo;
+};
+
+/* Define the HW IP blocks will be used in driver , add more if necessary */
+enum amd_hw_ip_block_type {
+ GC_HWIP = 1,
+ HDP_HWIP,
+ SDMA0_HWIP,
+ SDMA1_HWIP,
+ SDMA2_HWIP,
+ SDMA3_HWIP,
+ SDMA4_HWIP,
+ SDMA5_HWIP,
+ SDMA6_HWIP,
+ SDMA7_HWIP,
+ LSDMA_HWIP,
+ MMHUB_HWIP,
+ ATHUB_HWIP,
+ NBIO_HWIP,
+ MP0_HWIP,
+ MP1_HWIP,
+ UVD_HWIP,
+ VCN_HWIP = UVD_HWIP,
+ JPEG_HWIP = VCN_HWIP,
+ VCN1_HWIP,
+ VCE_HWIP,
+ VPE_HWIP,
+ DF_HWIP,
+ DCE_HWIP,
+ OSSSYS_HWIP,
+ SMUIO_HWIP,
+ PWR_HWIP,
+ NBIF_HWIP,
+ THM_HWIP,
+ CLK_HWIP,
+ UMC_HWIP,
+ RSMU_HWIP,
+ XGMI_HWIP,
+ DCI_HWIP,
+ PCIE_HWIP,
+ ISP_HWIP,
+ MAX_HWIP
+};
+
+#define HWIP_MAX_INSTANCE 44
+
+#define HW_ID_MAX 300
+#define IP_VERSION_FULL(mj, mn, rv, var, srev) \
+ (((mj) << 24) | ((mn) << 16) | ((rv) << 8) | ((var) << 4) | (srev))
+#define IP_VERSION(mj, mn, rv) IP_VERSION_FULL(mj, mn, rv, 0, 0)
+#define IP_VERSION_MAJ(ver) ((ver) >> 24)
+#define IP_VERSION_MIN(ver) (((ver) >> 16) & 0xFF)
+#define IP_VERSION_REV(ver) (((ver) >> 8) & 0xFF)
+#define IP_VERSION_VARIANT(ver) (((ver) >> 4) & 0xF)
+#define IP_VERSION_SUBREV(ver) ((ver) & 0xF)
+#define IP_VERSION_MAJ_MIN_REV(ver) ((ver) >> 8)
+
+struct amdgpu_ip_map_info {
+ /* Map of logical to actual dev instances/mask */
+ uint32_t dev_inst[MAX_HWIP][HWIP_MAX_INSTANCE];
+ int8_t (*logical_to_dev_inst)(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type block,
+ int8_t inst);
+ uint32_t (*logical_to_dev_mask)(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type block,
+ uint32_t mask);
+};
+
+enum amdgpu_uid_type {
+ AMDGPU_UID_TYPE_XCD,
+ AMDGPU_UID_TYPE_AID,
+ AMDGPU_UID_TYPE_SOC,
+ AMDGPU_UID_TYPE_MAX
+};
+
+#define AMDGPU_UID_INST_MAX 8 /* max number of instances for each UID type */
+
+struct amdgpu_uid {
+ uint64_t uid[AMDGPU_UID_TYPE_MAX][AMDGPU_UID_INST_MAX];
+ struct amdgpu_device *adev;
+};
+
+struct amd_powerplay {
+ void *pp_handle;
+ const struct amd_pm_funcs *pp_funcs;
+};
+
+struct ip_discovery_top;
+
+/* polaris10 kickers */
+#define ASICID_IS_P20(did, rid) (((did == 0x67DF) && \
+ ((rid == 0xE3) || \
+ (rid == 0xE4) || \
+ (rid == 0xE5) || \
+ (rid == 0xE7) || \
+ (rid == 0xEF))) || \
+ ((did == 0x6FDF) && \
+ ((rid == 0xE7) || \
+ (rid == 0xEF) || \
+ (rid == 0xFF))))
+
+#define ASICID_IS_P30(did, rid) ((did == 0x67DF) && \
+ ((rid == 0xE1) || \
+ (rid == 0xF7)))
+
+/* polaris11 kickers */
+#define ASICID_IS_P21(did, rid) (((did == 0x67EF) && \
+ ((rid == 0xE0) || \
+ (rid == 0xE5))) || \
+ ((did == 0x67FF) && \
+ ((rid == 0xCF) || \
+ (rid == 0xEF) || \
+ (rid == 0xFF))))
+
+#define ASICID_IS_P31(did, rid) ((did == 0x67EF) && \
+ ((rid == 0xE2)))
+
+/* polaris12 kickers */
+#define ASICID_IS_P23(did, rid) (((did == 0x6987) && \
+ ((rid == 0xC0) || \
+ (rid == 0xC1) || \
+ (rid == 0xC3) || \
+ (rid == 0xC7))) || \
+ ((did == 0x6981) && \
+ ((rid == 0x00) || \
+ (rid == 0x01) || \
+ (rid == 0x10))))
+
+struct amdgpu_mqd_prop {
+ uint64_t mqd_gpu_addr;
+ uint64_t hqd_base_gpu_addr;
+ uint64_t rptr_gpu_addr;
+ uint64_t wptr_gpu_addr;
+ uint32_t queue_size;
+ bool use_doorbell;
+ uint32_t doorbell_index;
+ uint64_t eop_gpu_addr;
+ uint32_t hqd_pipe_priority;
+ uint32_t hqd_queue_priority;
+ bool allow_tunneling;
+ bool hqd_active;
+ uint64_t shadow_addr;
+ uint64_t gds_bkup_addr;
+ uint64_t csa_addr;
+ uint64_t fence_address;
+ bool tmz_queue;
+ bool kernel_queue;
+};
+
+struct amdgpu_mqd {
+ unsigned mqd_size;
+ int (*init_mqd)(struct amdgpu_device *adev, void *mqd,
+ struct amdgpu_mqd_prop *p);
+};
+
+struct amdgpu_pcie_reset_ctx {
+ bool in_link_reset;
+ bool occurs_dpc;
+ bool audio_suspended;
+ struct pci_dev *swus;
+ struct pci_saved_state *swus_pcistate;
+ struct pci_saved_state *swds_pcistate;
+};
+
+/*
+ * Custom Init levels could be defined for different situations where a full
+ * initialization of all hardware blocks are not expected. Sample cases are
+ * custom init sequences after resume after S0i3/S3, reset on initialization,
+ * partial reset of blocks etc. Presently, this defines only two levels. Levels
+ * are described in corresponding struct definitions - amdgpu_init_default,
+ * amdgpu_init_minimal_xgmi.
+ */
+enum amdgpu_init_lvl_id {
+ AMDGPU_INIT_LEVEL_DEFAULT,
+ AMDGPU_INIT_LEVEL_MINIMAL_XGMI,
+ AMDGPU_INIT_LEVEL_RESET_RECOVERY,
+};
+
+struct amdgpu_init_level {
+ enum amdgpu_init_lvl_id level;
+ uint32_t hwini_ip_block_mask;
+};
+
+#define AMDGPU_RESET_MAGIC_NUM 64
+#define AMDGPU_MAX_DF_PERFMONS 4
+struct amdgpu_reset_domain;
+struct amdgpu_fru_info;
+
+enum amdgpu_enforce_isolation_mode {
+ AMDGPU_ENFORCE_ISOLATION_DISABLE = 0,
+ AMDGPU_ENFORCE_ISOLATION_ENABLE = 1,
+ AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY = 2,
+ AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER = 3,
+};
+
struct amdgpu_device {
struct device *dev;
- struct drm_device *ddev;
struct pci_dev *pdev;
+ struct drm_device ddev;
#ifdef CONFIG_DRM_AMD_ACP
struct amdgpu_acp acp;
#endif
-
+ struct amdgpu_hive_info *hive;
+ struct amdgpu_xcp_mgr *xcp_mgr;
/* ASIC */
enum amd_asic_type asic_type;
uint32_t family;
uint32_t rev_id;
uint32_t external_rev_id;
unsigned long flags;
+ unsigned long apu_flags;
int usec_timeout;
const struct amdgpu_asic_funcs *asic_funcs;
bool shutdown;
- bool need_dma32;
+ bool need_swiotlb;
bool accel_working;
- struct work_struct reset_work;
struct notifier_block acpi_nb;
+ struct notifier_block pm_nb;
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
- struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
- unsigned debugfs_count;
-#if defined(CONFIG_DEBUG_FS)
- struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
-#endif
- struct amdgpu_atif atif;
- struct amdgpu_atcs atcs;
+ struct debugfs_blob_wrapper debugfs_vbios_blob;
+ struct debugfs_blob_wrapper debugfs_discovery_blob;
struct mutex srbm_mutex;
/* GRBM index mutex. Protects concurrent access to GRBM index */
struct mutex grbm_idx_mutex;
struct dev_pm_domain vga_pm_domain;
bool have_disp_power_ref;
+ bool have_atomics_support;
/* BIOS */
bool is_atom_fw;
uint8_t *bios;
uint32_t bios_size;
- struct amdgpu_bo *stollen_vga_memory;
uint32_t bios_scratch_reg_offset;
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
@@ -1474,6 +994,7 @@ struct amdgpu_device {
void __iomem *rmmio;
/* protects concurrent MM_INDEX/DATA based register access */
spinlock_t mmio_idx_lock;
+ struct amdgpu_mmio_remap rmmio_remap;
/* protects concurrent SMC based register access */
spinlock_t smc_idx_lock;
amdgpu_rreg_t smc_rreg;
@@ -1484,6 +1005,12 @@ struct amdgpu_device {
amdgpu_wreg_t pcie_wreg;
amdgpu_rreg_t pciep_rreg;
amdgpu_wreg_t pciep_wreg;
+ amdgpu_rreg_ext_t pcie_rreg_ext;
+ amdgpu_wreg_ext_t pcie_wreg_ext;
+ amdgpu_rreg64_t pcie_rreg64;
+ amdgpu_wreg64_t pcie_wreg64;
+ amdgpu_rreg64_ext_t pcie_rreg64_ext;
+ amdgpu_wreg64_ext_t pcie_wreg64_ext;
/* protects concurrent UVD register access */
spinlock_t uvd_ctx_idx_lock;
amdgpu_rreg_t uvd_ctx_rreg;
@@ -1496,73 +1023,92 @@ struct amdgpu_device {
spinlock_t gc_cac_idx_lock;
amdgpu_rreg_t gc_cac_rreg;
amdgpu_wreg_t gc_cac_wreg;
+ /* protects concurrent se_cac register access */
+ spinlock_t se_cac_idx_lock;
+ amdgpu_rreg_t se_cac_rreg;
+ amdgpu_wreg_t se_cac_wreg;
/* protects concurrent ENDPOINT (audio) register access */
spinlock_t audio_endpt_idx_lock;
amdgpu_block_rreg_t audio_endpt_rreg;
amdgpu_block_wreg_t audio_endpt_wreg;
- void __iomem *rio_mem;
- resource_size_t rio_mem_size;
struct amdgpu_doorbell doorbell;
/* clock/pll info */
struct amdgpu_clock clock;
/* MC */
- struct amdgpu_mc mc;
+ struct amdgpu_gmc gmc;
struct amdgpu_gart gart;
- struct amdgpu_dummy_page dummy_page;
+ dma_addr_t dummy_page_addr;
struct amdgpu_vm_manager vm_manager;
struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS];
+ DECLARE_BITMAP(vmhubs_mask, AMDGPU_MAX_VMHUBS);
/* memory management */
struct amdgpu_mman mman;
- struct amdgpu_vram_scratch vram_scratch;
+ struct amdgpu_mem_scratch mem_scratch;
struct amdgpu_wb wb;
- atomic64_t vram_usage;
- atomic64_t vram_vis_usage;
- atomic64_t gtt_usage;
atomic64_t num_bytes_moved;
atomic64_t num_evictions;
+ atomic64_t num_vram_cpu_page_faults;
atomic_t gpu_reset_counter;
+ atomic_t vram_lost_counter;
/* data for buffer migration throttling */
struct {
spinlock_t lock;
s64 last_update_us;
s64 accum_us; /* accumulated microseconds */
+ s64 accum_us_vis; /* for visible VRAM */
u32 log2_max_MBps;
} mm_stats;
/* display */
bool enable_virtual_display;
+ struct amdgpu_vkms_output *amdgpu_vkms_output;
struct amdgpu_mode_info mode_info;
- struct work_struct hotplug_work;
+ /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
+ struct delayed_work hotplug_work;
struct amdgpu_irq_src crtc_irq;
+ struct amdgpu_irq_src vline0_irq;
+ struct amdgpu_irq_src vupdate_irq;
struct amdgpu_irq_src pageflip_irq;
struct amdgpu_irq_src hpd_irq;
+ struct amdgpu_irq_src dmub_trace_irq;
+ struct amdgpu_irq_src dmub_outbox_irq;
/* rings */
u64 fence_context;
unsigned num_rings;
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
+ struct dma_fence __rcu *gang_submit;
bool ib_pool_ready;
- struct amdgpu_sa_manager ring_tmp_bo;
+ struct amdgpu_sa_manager ib_pools[AMDGPU_IB_POOL_MAX];
+ struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
/* interrupts */
struct amdgpu_irq irq;
/* powerplay */
struct amd_powerplay powerplay;
- bool pp_enabled;
- bool pp_force_state_enabled;
-
- /* dpm */
struct amdgpu_pm pm;
- u32 cg_flags;
+ u64 cg_flags;
u32 pg_flags;
- /* amdgpu smumgr */
- struct amdgpu_smumgr smu;
+ /* nbio */
+ struct amdgpu_nbio nbio;
+
+ /* hdp */
+ struct amdgpu_hdp hdp;
+
+ /* smuio */
+ struct amdgpu_smuio smuio;
+
+ /* mmhub */
+ struct amdgpu_mmhub mmhub;
+
+ /* gfxhub */
+ struct amdgpu_gfxhub gfxhub;
/* gfx */
struct amdgpu_gfx gfx;
@@ -1570,12 +1116,28 @@ struct amdgpu_device {
/* sdma */
struct amdgpu_sdma sdma;
+ /* lsdma */
+ struct amdgpu_lsdma lsdma;
+
/* uvd */
struct amdgpu_uvd uvd;
/* vce */
struct amdgpu_vce vce;
+ /* vcn */
+ struct amdgpu_vcn vcn;
+
+ /* jpeg */
+ struct amdgpu_jpeg jpeg;
+
+ /* vpe */
+ struct amdgpu_vpe vpe;
+
+ /* umsch */
+ struct amdgpu_umsch_mm umsch_mm;
+ bool enable_umsch_mm;
+
/* firmwares */
struct amdgpu_firmware firmware;
@@ -1585,79 +1147,313 @@ struct amdgpu_device {
/* GDS */
struct amdgpu_gds gds;
+ /* for userq and VM fences */
+ struct amdgpu_seq64 seq64;
+
+ /* UMC */
+ struct amdgpu_umc umc;
+
+ /* display related functionality */
+ struct amdgpu_display_manager dm;
+
+#if defined(CONFIG_DRM_AMD_ISP)
+ /* isp */
+ struct amdgpu_isp isp;
+#endif
+
+ /* mes */
+ bool enable_mes;
+ bool enable_mes_kiq;
+ bool enable_uni_mes;
+ struct amdgpu_mes mes;
+ struct amdgpu_mqd mqds[AMDGPU_HW_IP_NUM];
+ const struct amdgpu_userq_funcs *userq_funcs[AMDGPU_HW_IP_NUM];
+
+ /* xarray used to retrieve the user queue fence driver reference
+ * in the EOP interrupt handler to signal the particular user
+ * queue fence.
+ */
+ struct xarray userq_xa;
+
+ /* df */
+ struct amdgpu_df df;
+
+ /* MCA */
+ struct amdgpu_mca mca;
+
+ /* ACA */
+ struct amdgpu_aca aca;
+
+ /* CPER */
+ struct amdgpu_cper cper;
+
struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
+ uint32_t harvest_ip_mask;
int num_ip_blocks;
struct mutex mn_lock;
DECLARE_HASHTABLE(mn_hash, 7);
/* tracking pinned memory */
- u64 vram_pin_size;
- u64 invisible_pin_size;
- u64 gart_pin_size;
+ atomic64_t vram_pin_size;
+ atomic64_t visible_pin_size;
+ atomic64_t gart_pin_size;
- /* amdkfd interface */
- struct kfd_dev *kfd;
+ /* soc15 register offset based on ip, instance and segment */
+ uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
+ struct amdgpu_ip_map_info ip_map;
- struct amdgpu_virt virt;
+ /* delayed work_func for deferring clockgating during resume */
+ struct delayed_work delayed_init_work;
- /* link all shadow bo */
- struct list_head shadow_list;
- struct mutex shadow_list_lock;
- /* link all gtt */
- spinlock_t gtt_list_lock;
- struct list_head gtt_list;
+ struct amdgpu_virt virt;
/* record hw reset is performed */
bool has_hw_reset;
+ u8 reset_magic[AMDGPU_RESET_MAGIC_NUM];
+
+ /* s3/s4 mask */
+ bool in_suspend;
+ bool in_s3;
+ bool in_s4;
+ bool in_s0ix;
+ suspend_state_t last_suspend_state;
+
+ enum pp_mp1_state mp1_state;
+ struct amdgpu_doorbell_index doorbell_index;
+
+ struct mutex notifier_lock;
+
+ int asic_reset_res;
+ struct work_struct xgmi_reset_work;
+ struct list_head reset_list;
+
+ long gfx_timeout;
+ long sdma_timeout;
+ long video_timeout;
+ long compute_timeout;
+ long psp_timeout;
+
+ uint64_t unique_id;
+ uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS];
+
+ /* enable runtime pm on the device */
+ bool in_runpm;
+ bool has_pr3;
+ bool ucode_sysfs_en;
+
+ struct amdgpu_fru_info *fru_info;
+ atomic_t throttling_logging_enabled;
+ struct ratelimit_state throttling_logging_rs;
+ uint32_t ras_hw_enabled;
+ uint32_t ras_enabled;
+ bool ras_default_ecc_enabled;
+
+ bool no_hw_access;
+ struct pci_saved_state *pci_state;
+ pci_channel_state_t pci_channel_state;
+
+ struct amdgpu_pcie_reset_ctx pcie_reset_ctx;
+
+ /* Track auto wait count on s_barrier settings */
+ bool barrier_has_auto_waitcnt;
+
+ struct amdgpu_reset_control *reset_cntl;
+ uint32_t ip_versions[MAX_HWIP][HWIP_MAX_INSTANCE];
+
+ bool ram_is_direct_mapped;
+
+ struct list_head ras_list;
+
+ struct ip_discovery_top *ip_top;
+
+ struct amdgpu_reset_domain *reset_domain;
+
+ struct mutex benchmark_mutex;
+
+ bool scpm_enabled;
+ uint32_t scpm_status;
+
+ struct work_struct reset_work;
+
+ bool dc_enabled;
+ /* Mask of active clusters */
+ uint32_t aid_mask;
+
+ /* Debug */
+ bool debug_vm;
+ bool debug_largebar;
+ bool debug_disable_soft_recovery;
+ bool debug_use_vram_fw_buf;
+ bool debug_enable_ras_aca;
+ bool debug_exp_resets;
+ bool debug_disable_gpu_ring_reset;
+ bool debug_vm_userptr;
+ bool debug_disable_ce_logs;
+
+ /* Protection for the following isolation structure */
+ struct mutex enforce_isolation_mutex;
+ enum amdgpu_enforce_isolation_mode enforce_isolation[MAX_XCP];
+ struct amdgpu_isolation {
+ void *owner;
+ struct dma_fence *spearhead;
+ struct amdgpu_sync active;
+ struct amdgpu_sync prev;
+ } isolation[MAX_XCP];
+
+ struct amdgpu_init_level *init_lvl;
+
+ /* This flag is used to determine how VRAM allocations are handled for APUs
+ * in KFD: VRAM or GTT.
+ */
+ bool apu_prefer_gtt;
+
+ struct list_head userq_mgr_list;
+ struct mutex userq_mutex;
+ bool userq_halt_for_enforce_isolation;
+ struct amdgpu_uid *uid_info;
+
+ /* KFD
+ * Must be last --ends in a flexible-array member.
+ */
+ struct amdgpu_kfd_dev kfd;
};
-static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
+static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev,
+ uint8_t ip, uint8_t inst)
+{
+ /* This considers only major/minor/rev and ignores
+ * subrevision/variant fields.
+ */
+ return adev->ip_versions[ip][inst] & ~0xFFU;
+}
+
+static inline uint32_t amdgpu_ip_version_full(const struct amdgpu_device *adev,
+ uint8_t ip, uint8_t inst)
+{
+ /* This returns full version - major/minor/rev/variant/subrevision */
+ return adev->ip_versions[ip][inst];
+}
+
+static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
+{
+ return container_of(ddev, struct amdgpu_device, ddev);
+}
+
+static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev)
+{
+ return &adev->ddev;
+}
+
+static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev)
{
return container_of(bdev, struct amdgpu_device, mman.bdev);
}
-bool amdgpu_device_is_px(struct drm_device *dev);
+static inline bool amdgpu_is_multi_aid(struct amdgpu_device *adev)
+{
+ return !!adev->aid_mask;
+}
+
int amdgpu_device_init(struct amdgpu_device *adev,
- struct drm_device *ddev,
- struct pci_dev *pdev,
uint32_t flags);
-void amdgpu_device_fini(struct amdgpu_device *adev);
+void amdgpu_device_fini_hw(struct amdgpu_device *adev);
+void amdgpu_device_fini_sw(struct amdgpu_device *adev);
+
int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
-uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
+void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
+ void *buf, size_t size, bool write);
+size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
+ void *buf, size_t size, bool write);
+
+void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
+ void *buf, size_t size, bool write);
+uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
+ uint32_t inst, uint32_t reg_addr, char reg_name[],
+ uint32_t expected_value, uint32_t mask);
+uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
+ uint32_t reg, uint32_t acc_flags);
+u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
+ u64 reg_addr);
+uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev,
+ uint32_t reg, uint32_t acc_flags,
+ uint32_t xcc_id);
+void amdgpu_device_wreg(struct amdgpu_device *adev,
+ uint32_t reg, uint32_t v,
uint32_t acc_flags);
-void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
- uint32_t acc_flags);
-u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
-void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
-
-u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
-void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
-u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
-void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
+void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
+ u64 reg_addr, u32 reg_data);
+void amdgpu_device_xcc_wreg(struct amdgpu_device *adev,
+ uint32_t reg, uint32_t v,
+ uint32_t acc_flags,
+ uint32_t xcc_id);
+void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
+ uint32_t reg, uint32_t v, uint32_t xcc_id);
+void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
+uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
+
+u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
+ u32 reg_addr);
+u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
+ u32 reg_addr);
+u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev,
+ u64 reg_addr);
+void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
+ u32 reg_addr, u32 reg_data);
+void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
+ u32 reg_addr, u64 reg_data);
+void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
+ u64 reg_addr, u64 reg_data);
+u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev);
+bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev,
+ enum amd_asic_type asic_type);
+bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
+
+void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev);
+
+int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
+ struct amdgpu_reset_context *reset_context);
+
+int amdgpu_do_asic_reset(struct list_head *device_list_handle,
+ struct amdgpu_reset_context *reset_context);
+
+int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context);
+
+int emu_soc_asic_init(struct amdgpu_device *adev);
/*
* Registers read & write functions.
*/
-
-#define AMDGPU_REGS_IDX (1<<0)
#define AMDGPU_REGS_NO_KIQ (1<<1)
+#define AMDGPU_REGS_RLC (1<<2)
+
+#define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
+#define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
-#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
-#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
+#define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg), 0)
+#define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v), 0)
-#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0)
-#define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_IDX)
-#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0))
-#define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0)
-#define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_IDX)
+#define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
+#define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
+
+#define RREG32(reg) amdgpu_device_rreg(adev, (reg), 0)
+#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_device_rreg(adev, (reg), 0))
+#define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0)
#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
+#define RREG32_XCC(reg, inst) amdgpu_device_xcc_rreg(adev, (reg), 0, inst)
+#define WREG32_XCC(reg, v, inst) amdgpu_device_xcc_wreg(adev, (reg), (v), 0, inst)
#define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
#define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
#define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg))
#define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v))
+#define RREG32_PCIE_EXT(reg) adev->pcie_rreg_ext(adev, (reg))
+#define WREG32_PCIE_EXT(reg, v) adev->pcie_wreg_ext(adev, (reg), (v))
+#define RREG64_PCIE(reg) adev->pcie_rreg64(adev, (reg))
+#define WREG64_PCIE(reg, v) adev->pcie_wreg64(adev, (reg), (v))
+#define RREG64_PCIE_EXT(reg) adev->pcie_rreg64_ext(adev, (reg))
+#define WREG64_PCIE_EXT(reg, v) adev->pcie_wreg64_ext(adev, (reg), (v))
#define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
#define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
#define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
@@ -1666,6 +1462,8 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
#define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
#define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg))
#define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v))
+#define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg))
+#define WREG32_SE_CAC(reg, v) adev->se_cac_wreg(adev, (reg), (v))
#define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
#define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
#define WREG32_P(reg, val, mask) \
@@ -1684,14 +1482,16 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
tmp_ |= ((val) & ~(mask)); \
WREG32_PLL(reg, tmp_); \
} while (0)
-#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false))
-#define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
-#define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
-#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
-#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
-#define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index))
-#define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v))
+#define WREG32_SMC_P(_Reg, _Val, _Mask) \
+ do { \
+ u32 tmp = RREG32_SMC(_Reg); \
+ tmp &= (_Mask); \
+ tmp |= ((_Val) & ~(_Mask)); \
+ WREG32_SMC(_Reg, tmp); \
+ } while (0)
+
+#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_device_rreg((adev), (reg), false))
#define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
#define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
@@ -1709,6 +1509,7 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
#define WREG32_FIELD_OFFSET(reg, offset, field, val) \
WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+#define AMDGPU_GET_REG_FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> (l))
/*
* BIOS helpers.
*/
@@ -1717,69 +1518,12 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
/*
- * RING helpers.
- */
-static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
-{
- if (ring->count_dw <= 0)
- DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
- ring->ring[ring->wptr++ & ring->buf_mask] = v;
- ring->wptr &= ring->ptr_mask;
- ring->count_dw--;
-}
-
-static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring, void *src, int count_dw)
-{
- unsigned occupied, chunk1, chunk2;
- void *dst;
-
- if (ring->count_dw < count_dw) {
- DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
- } else {
- occupied = ring->wptr & ring->buf_mask;
- dst = (void *)&ring->ring[occupied];
- chunk1 = ring->buf_mask + 1 - occupied;
- chunk1 = (chunk1 >= count_dw) ? count_dw: chunk1;
- chunk2 = count_dw - chunk1;
- chunk1 <<= 2;
- chunk2 <<= 2;
-
- if (chunk1)
- memcpy(dst, src, chunk1);
-
- if (chunk2) {
- src += chunk1;
- dst = (void *)ring->ring;
- memcpy(dst, src, chunk2);
- }
-
- ring->wptr += count_dw;
- ring->wptr &= ring->ptr_mask;
- ring->count_dw -= count_dw;
- }
-}
-
-static inline struct amdgpu_sdma_instance *
-amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- int i;
-
- for (i = 0; i < adev->sdma.num_instances; i++)
- if (&adev->sdma.instance[i].ring == ring)
- break;
-
- if (i < AMDGPU_MAX_SDMA_INSTANCES)
- return &adev->sdma.instance[i];
- else
- return NULL;
-}
-
-/*
* ASICs macro.
*/
-#define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state))
+#define amdgpu_asic_set_vga_state(adev, state) \
+ ((adev)->asic_funcs->set_vga_state ? (adev)->asic_funcs->set_vga_state((adev), (state)) : 0)
#define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
+#define amdgpu_asic_reset_method(adev) (adev)->asic_funcs->reset_method((adev))
#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
@@ -1790,107 +1534,93 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
-#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
-#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
-#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
-#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
-#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
-#define amdgpu_vm_get_pte_flags(adev, flags) (adev)->gart.gart_funcs->get_vm_pte_flags((adev),(flags))
-#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
-#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
-#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
-#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
-#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
-#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
-#define amdgpu_ring_emit_ib(r, ib, vm_id, c) (r)->funcs->emit_ib((r), (ib), (vm_id), (c))
-#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
-#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
-#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
-#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
-#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
-#define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r))
-#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
-#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
-#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
-#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
-#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
-#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
-#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
-#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
-#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
-#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
-#define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r))
-#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
-#define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc))
-#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
-#define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
-#define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
-#define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h))
-#define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev))
-#define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev))
-#define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async))
-#define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
-#define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
-#define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
-#define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s))
-#define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
-#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
-#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
-#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
-#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
-#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
-#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
+#define amdgpu_asic_flush_hdp(adev, r) \
+ ((adev)->asic_funcs->flush_hdp ? (adev)->asic_funcs->flush_hdp((adev), (r)) : (adev)->hdp.funcs->flush_hdp((adev), (r)))
+#define amdgpu_asic_invalidate_hdp(adev, r) \
+ ((adev)->asic_funcs->invalidate_hdp ? (adev)->asic_funcs->invalidate_hdp((adev), (r)) : \
+ ((adev)->hdp.funcs->invalidate_hdp ? (adev)->hdp.funcs->invalidate_hdp((adev), (r)) : (void)0))
+#define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
+#define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev))
+#define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1)))
+#define amdgpu_asic_need_reset_on_init(adev) (adev)->asic_funcs->need_reset_on_init((adev))
+#define amdgpu_asic_get_pcie_replay_count(adev) ((adev)->asic_funcs->get_pcie_replay_count((adev)))
+#define amdgpu_asic_supports_baco(adev) (adev)->asic_funcs->supports_baco((adev))
+#define amdgpu_asic_pre_asic_init(adev) (adev)->asic_funcs->pre_asic_init((adev))
+#define amdgpu_asic_update_umd_stable_pstate(adev, enter) \
+ ((adev)->asic_funcs->update_umd_stable_pstate ? (adev)->asic_funcs->update_umd_stable_pstate((adev), (enter)) : 0)
+#define amdgpu_asic_query_video_codecs(adev, e, c) (adev)->asic_funcs->query_video_codecs((adev), (e), (c))
+
+#define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter))
+
+#define BIT_MASK_UPPER(i) ((i) >= BITS_PER_LONG ? 0 : ~0UL << (i))
+#define for_each_inst(i, inst_mask) \
+ for (i = ffs(inst_mask); i-- != 0; \
+ i = ffs(inst_mask & BIT_MASK_UPPER(i + 1)))
/* Common functions */
-int amdgpu_gpu_reset(struct amdgpu_device *adev);
-bool amdgpu_need_backup(struct amdgpu_device *adev);
-void amdgpu_pci_config_reset(struct amdgpu_device *adev);
-bool amdgpu_need_post(struct amdgpu_device *adev);
-void amdgpu_update_display_priority(struct amdgpu_device *adev);
-
-int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
-int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
- u32 ip_instance, u32 ring,
- struct amdgpu_ring **out_ring);
-void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes);
-void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
-bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
-int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
-int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
- uint32_t flags);
-bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
-struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
-bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
- unsigned long end);
-bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
- int *last_invalidated);
-bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
-uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
- struct ttm_mem_reg *mem);
-void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
-void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
-void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
-int amdgpu_ttm_init(struct amdgpu_device *adev);
-void amdgpu_ttm_fini(struct amdgpu_device *adev);
-void amdgpu_program_register_sequence(struct amdgpu_device *adev,
+bool amdgpu_device_has_job_running(struct amdgpu_device *adev);
+bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ struct amdgpu_job *job,
+ struct amdgpu_reset_context *reset_context);
+void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
+int amdgpu_device_pci_reset(struct amdgpu_device *adev);
+bool amdgpu_device_need_post(struct amdgpu_device *adev);
+bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev);
+bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
+
+void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
+ u64 num_vis_bytes);
+int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev);
+void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
const u32 *registers,
const u32 array_size);
-bool amdgpu_device_is_px(struct drm_device *dev);
+int amdgpu_device_mode1_reset(struct amdgpu_device *adev);
+int amdgpu_device_link_reset(struct amdgpu_device *adev);
+bool amdgpu_device_supports_atpx(struct amdgpu_device *adev);
+bool amdgpu_device_supports_px(struct amdgpu_device *adev);
+bool amdgpu_device_supports_boco(struct amdgpu_device *adev);
+bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev);
+int amdgpu_device_supports_baco(struct amdgpu_device *adev);
+void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev);
+bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
+ struct amdgpu_device *peer_adev);
+int amdgpu_device_baco_enter(struct amdgpu_device *adev);
+int amdgpu_device_baco_exit(struct amdgpu_device *adev);
+
+void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
+void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
+
+void amdgpu_device_halt(struct amdgpu_device *adev);
+u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
+ u32 reg);
+void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
+ u32 reg, u32 v);
+struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev);
+struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
+ struct dma_fence *gang);
+struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_job *job);
+bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev);
+ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring);
+ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset);
+
/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)
void amdgpu_register_atpx_handler(void);
void amdgpu_unregister_atpx_handler(void);
bool amdgpu_has_atpx_dgpu_power_cntl(void);
bool amdgpu_is_atpx_hybrid(void);
-bool amdgpu_atpx_dgpu_req_power_for_displays(void);
bool amdgpu_has_atpx(void);
#else
static inline void amdgpu_register_atpx_handler(void) {}
static inline void amdgpu_unregister_atpx_handler(void) {}
static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
-static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; }
static inline bool amdgpu_has_atpx(void) { return false; }
#endif
@@ -1900,24 +1630,23 @@ static inline bool amdgpu_has_atpx(void) { return false; }
extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
extern const int amdgpu_max_kms_ioctl;
-int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
+int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags);
void amdgpu_driver_unload_kms(struct drm_device *dev);
-void amdgpu_driver_lastclose_kms(struct drm_device *dev);
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv);
-int amdgpu_suspend(struct amdgpu_device *adev);
-int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon);
-int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon);
-u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
-int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
-void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
-int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
- int *max_error,
- struct timeval *vblank_time,
- unsigned flags);
-long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg);
+void amdgpu_driver_release_kms(struct drm_device *dev);
+
+int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
+int amdgpu_device_prepare(struct drm_device *dev);
+void amdgpu_device_complete(struct drm_device *dev);
+int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
+int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
+u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc);
+int amdgpu_enable_vblank_kms(struct drm_crtc *crtc);
+void amdgpu_disable_vblank_kms(struct drm_crtc *crtc);
+int amdgpu_info_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
/*
* functions used by amdgpu_encoder.c
@@ -1939,22 +1668,141 @@ struct amdgpu_afmt_acr {
struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
/* amdgpu_acpi.c */
+
+struct amdgpu_numa_info {
+ uint64_t size;
+ int pxm;
+ int nid;
+};
+
+/* ATCS Device/Driver State */
+#define AMDGPU_ATCS_PSC_DEV_STATE_D0 0
+#define AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT 3
+#define AMDGPU_ATCS_PSC_DRV_STATE_OPR 0
+#define AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR 1
+
#if defined(CONFIG_ACPI)
int amdgpu_acpi_init(struct amdgpu_device *adev);
void amdgpu_acpi_fini(struct amdgpu_device *adev);
bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
+bool amdgpu_acpi_is_power_shift_control_supported(void);
int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
u8 perf_req, bool advertise);
+int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
+ u8 dev_state, bool drv_state);
+int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev,
+ enum amdgpu_ss ss_state);
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
+int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset,
+ u64 *tmr_size);
+int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, int xcc_id,
+ struct amdgpu_numa_info *numa_info);
+
+void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
+bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev);
+void amdgpu_acpi_detect(void);
+void amdgpu_acpi_release(void);
#else
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
+static inline int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev,
+ u64 *tmr_offset, u64 *tmr_size)
+{
+ return -EINVAL;
+}
+static inline int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev,
+ int xcc_id,
+ struct amdgpu_numa_info *numa_info)
+{
+ return -EINVAL;
+}
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
+static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; }
+static inline void amdgpu_acpi_detect(void) { }
+static inline void amdgpu_acpi_release(void) { }
+static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
+static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
+ u8 dev_state, bool drv_state) { return 0; }
+static inline int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev,
+ enum amdgpu_ss ss_state)
+{
+ return 0;
+}
+static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps) { }
+#endif
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
+bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
+bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
+#else
+static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
+static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
#endif
-struct amdgpu_bo_va_mapping *
-amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
- uint64_t addr, struct amdgpu_bo **bo);
-int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser);
+#if defined(CONFIG_DRM_AMD_ISP)
+int amdgpu_acpi_get_isp4_dev(struct acpi_device **dev);
+#endif
+
+void amdgpu_register_gpu_instance(struct amdgpu_device *adev);
+void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev);
+
+pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state);
+pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev);
+pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev);
+void amdgpu_pci_resume(struct pci_dev *pdev);
+
+bool amdgpu_device_cache_pci_state(struct pci_dev *pdev);
+bool amdgpu_device_load_pci_state(struct pci_dev *pdev);
+
+bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev);
+
+int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
+ enum amd_clockgating_state state);
+int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
+ enum amd_powergating_state state);
+
+static inline bool amdgpu_device_has_timeouts_enabled(struct amdgpu_device *adev)
+{
+ return amdgpu_gpu_recovery != 0 &&
+ adev->gfx_timeout != MAX_SCHEDULE_TIMEOUT &&
+ adev->compute_timeout != MAX_SCHEDULE_TIMEOUT &&
+ adev->sdma_timeout != MAX_SCHEDULE_TIMEOUT &&
+ adev->video_timeout != MAX_SCHEDULE_TIMEOUT;
+}
#include "amdgpu_object.h"
+
+static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
+{
+ return adev->gmc.tmz_enabled;
+}
+
+int amdgpu_in_reset(struct amdgpu_device *adev);
+
+extern const struct attribute_group amdgpu_vram_mgr_attr_group;
+extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
+extern const struct attribute_group amdgpu_flash_attr_group;
+
+void amdgpu_set_init_level(struct amdgpu_device *adev,
+ enum amdgpu_init_lvl_id lvl);
+
+static inline int amdgpu_device_bus_status_check(struct amdgpu_device *adev)
+{
+ u32 status;
+ int r;
+
+ r = pci_read_config_dword(adev->pdev, PCI_COMMAND, &status);
+ if (r || PCI_POSSIBLE_ERROR(status)) {
+ dev_err(adev->dev, "device lost from bus!");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void amdgpu_device_set_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst,
+ uint64_t uid);
+uint64_t amdgpu_device_get_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
new file mode 100644
index 000000000000..9b3180449150
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c
@@ -0,0 +1,984 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/list.h>
+#include "amdgpu.h"
+#include "amdgpu_aca.h"
+#include "amdgpu_ras.h"
+
+#define ACA_BANK_HWID(type, hwid, mcatype) [ACA_HWIP_TYPE_##type] = {hwid, mcatype}
+
+typedef int bank_handler_t(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type, void *data);
+
+static struct aca_hwip aca_hwid_mcatypes[ACA_HWIP_TYPE_COUNT] = {
+ ACA_BANK_HWID(SMU, 0x01, 0x01),
+ ACA_BANK_HWID(PCS_XGMI, 0x50, 0x00),
+ ACA_BANK_HWID(UMC, 0x96, 0x00),
+};
+
+static void aca_banks_init(struct aca_banks *banks)
+{
+ if (!banks)
+ return;
+
+ memset(banks, 0, sizeof(*banks));
+ INIT_LIST_HEAD(&banks->list);
+}
+
+static int aca_banks_add_bank(struct aca_banks *banks, struct aca_bank *bank)
+{
+ struct aca_bank_node *node;
+
+ if (!bank)
+ return -EINVAL;
+
+ node = kvzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ memcpy(&node->bank, bank, sizeof(*bank));
+
+ INIT_LIST_HEAD(&node->node);
+ list_add_tail(&node->node, &banks->list);
+
+ banks->nr_banks++;
+
+ return 0;
+}
+
+static void aca_banks_release(struct aca_banks *banks)
+{
+ struct aca_bank_node *node, *tmp;
+
+ if (list_empty(&banks->list))
+ return;
+
+ list_for_each_entry_safe(node, tmp, &banks->list, node) {
+ list_del(&node->node);
+ kvfree(node);
+ banks->nr_banks--;
+ }
+}
+
+static int aca_smu_get_valid_aca_count(struct amdgpu_device *adev, enum aca_smu_type type, u32 *count)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+ const struct aca_smu_funcs *smu_funcs = aca->smu_funcs;
+
+ if (!count)
+ return -EINVAL;
+
+ if (!smu_funcs || !smu_funcs->get_valid_aca_count)
+ return -EOPNOTSUPP;
+
+ return smu_funcs->get_valid_aca_count(adev, type, count);
+}
+
+static struct aca_regs_dump {
+ const char *name;
+ int reg_idx;
+} aca_regs[] = {
+ {"CONTROL", ACA_REG_IDX_CTL},
+ {"STATUS", ACA_REG_IDX_STATUS},
+ {"ADDR", ACA_REG_IDX_ADDR},
+ {"MISC", ACA_REG_IDX_MISC0},
+ {"CONFIG", ACA_REG_IDX_CONFIG},
+ {"IPID", ACA_REG_IDX_IPID},
+ {"SYND", ACA_REG_IDX_SYND},
+ {"DESTAT", ACA_REG_IDX_DESTAT},
+ {"DEADDR", ACA_REG_IDX_DEADDR},
+ {"CONTROL_MASK", ACA_REG_IDX_CTL_MASK},
+};
+
+static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, struct aca_bank *bank,
+ struct ras_query_context *qctx)
+{
+ u64 event_id = qctx ? qctx->evid.event_id : RAS_EVENT_INVALID_ID;
+ int i;
+
+ if (adev->debug_disable_ce_logs &&
+ bank->smu_err_type == ACA_SMU_TYPE_CE &&
+ !ACA_BANK_ERR_IS_DEFFERED(bank))
+ return;
+
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "Accelerator Check Architecture events logged\n");
+ /* plus 1 for output format, e.g: ACA[08/08]: xxxx */
+ for (i = 0; i < ARRAY_SIZE(aca_regs); i++)
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "ACA[%02d/%02d].%s=0x%016llx\n",
+ idx + 1, total, aca_regs[i].name, bank->regs[aca_regs[i].reg_idx]);
+
+ if (ACA_REG__STATUS__SCRUB(bank->regs[ACA_REG_IDX_STATUS]))
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "hardware error logged by the scrubber\n");
+}
+
+static bool aca_bank_hwip_is_matched(struct aca_bank *bank, enum aca_hwip_type type)
+{
+
+ struct aca_hwip *hwip;
+ int hwid, mcatype;
+ u64 ipid;
+
+ if (!bank || type == ACA_HWIP_TYPE_UNKNOW)
+ return false;
+
+ hwip = &aca_hwid_mcatypes[type];
+ if (!hwip->hwid)
+ return false;
+
+ ipid = bank->regs[ACA_REG_IDX_IPID];
+ hwid = ACA_REG__IPID__HARDWAREID(ipid);
+ mcatype = ACA_REG__IPID__MCATYPE(ipid);
+
+ return hwip->hwid == hwid && hwip->mcatype == mcatype;
+}
+
+static int aca_smu_get_valid_aca_banks(struct amdgpu_device *adev, enum aca_smu_type type,
+ int start, int count,
+ struct aca_banks *banks, struct ras_query_context *qctx)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+ const struct aca_smu_funcs *smu_funcs = aca->smu_funcs;
+ struct aca_bank bank;
+ int i, max_count, ret;
+
+ if (!count)
+ return 0;
+
+ if (!smu_funcs || !smu_funcs->get_valid_aca_bank)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case ACA_SMU_TYPE_UE:
+ max_count = smu_funcs->max_ue_bank_count;
+ break;
+ case ACA_SMU_TYPE_CE:
+ max_count = smu_funcs->max_ce_bank_count;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (start + count > max_count)
+ return -EINVAL;
+
+ count = min_t(int, count, max_count);
+ for (i = 0; i < count; i++) {
+ memset(&bank, 0, sizeof(bank));
+ ret = smu_funcs->get_valid_aca_bank(adev, type, start + i, &bank);
+ if (ret)
+ return ret;
+
+ bank.smu_err_type = type;
+
+ /*
+ * Poison being consumed when injecting a UE while running background workloads,
+ * which are unexpected.
+ */
+ if (type == ACA_SMU_TYPE_UE &&
+ ACA_REG__STATUS__POISON(bank.regs[ACA_REG_IDX_STATUS]) &&
+ !aca_bank_hwip_is_matched(&bank, ACA_HWIP_TYPE_UMC))
+ continue;
+
+ aca_smu_bank_dump(adev, i, count, &bank, qctx);
+
+ ret = aca_banks_add_bank(banks, &bank);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type)
+{
+ const struct aca_bank_ops *bank_ops = handle->bank_ops;
+
+ /* Parse all deferred errors with UMC aca handle */
+ if (ACA_BANK_ERR_IS_DEFFERED(bank))
+ return handle->hwip == ACA_HWIP_TYPE_UMC;
+
+ if (!aca_bank_hwip_is_matched(bank, handle->hwip))
+ return false;
+
+ if (!bank_ops->aca_bank_is_valid)
+ return true;
+
+ return bank_ops->aca_bank_is_valid(handle, bank, type, handle->data);
+}
+
+static struct aca_bank_error *new_bank_error(struct aca_error *aerr, struct aca_bank_info *info)
+{
+ struct aca_bank_error *bank_error;
+
+ bank_error = kvzalloc(sizeof(*bank_error), GFP_KERNEL);
+ if (!bank_error)
+ return NULL;
+
+ INIT_LIST_HEAD(&bank_error->node);
+ memcpy(&bank_error->info, info, sizeof(*info));
+
+ mutex_lock(&aerr->lock);
+ list_add_tail(&bank_error->node, &aerr->list);
+ aerr->nr_errors++;
+ mutex_unlock(&aerr->lock);
+
+ return bank_error;
+}
+
+static struct aca_bank_error *find_bank_error(struct aca_error *aerr, struct aca_bank_info *info)
+{
+ struct aca_bank_error *bank_error = NULL;
+ struct aca_bank_info *tmp_info;
+ bool found = false;
+
+ mutex_lock(&aerr->lock);
+ list_for_each_entry(bank_error, &aerr->list, node) {
+ tmp_info = &bank_error->info;
+ if (tmp_info->socket_id == info->socket_id &&
+ tmp_info->die_id == info->die_id) {
+ found = true;
+ goto out_unlock;
+ }
+ }
+
+out_unlock:
+ mutex_unlock(&aerr->lock);
+
+ return found ? bank_error : NULL;
+}
+
+static void aca_bank_error_remove(struct aca_error *aerr, struct aca_bank_error *bank_error)
+{
+ if (!aerr || !bank_error)
+ return;
+
+ list_del(&bank_error->node);
+ aerr->nr_errors--;
+
+ kvfree(bank_error);
+}
+
+static struct aca_bank_error *get_bank_error(struct aca_error *aerr, struct aca_bank_info *info)
+{
+ struct aca_bank_error *bank_error;
+
+ if (!aerr || !info)
+ return NULL;
+
+ bank_error = find_bank_error(aerr, info);
+ if (bank_error)
+ return bank_error;
+
+ return new_bank_error(aerr, info);
+}
+
+int aca_error_cache_log_bank_error(struct aca_handle *handle, struct aca_bank_info *info,
+ enum aca_error_type type, u64 count)
+{
+ struct aca_error_cache *error_cache = &handle->error_cache;
+ struct aca_bank_error *bank_error;
+ struct aca_error *aerr;
+
+ if (!handle || !info || type >= ACA_ERROR_TYPE_COUNT)
+ return -EINVAL;
+
+ if (!count)
+ return 0;
+
+ aerr = &error_cache->errors[type];
+ bank_error = get_bank_error(aerr, info);
+ if (!bank_error)
+ return -ENOMEM;
+
+ bank_error->count += count;
+
+ return 0;
+}
+
+static int aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type)
+{
+ const struct aca_bank_ops *bank_ops = handle->bank_ops;
+
+ if (!bank)
+ return -EINVAL;
+
+ if (!bank_ops->aca_bank_parser)
+ return -EOPNOTSUPP;
+
+ return bank_ops->aca_bank_parser(handle, bank, type,
+ handle->data);
+}
+
+static int handler_aca_log_bank_error(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ int ret;
+
+ ret = aca_bank_parser(handle, bank, type);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int aca_dispatch_bank(struct aca_handle_manager *mgr, struct aca_bank *bank,
+ enum aca_smu_type type, bank_handler_t handler, void *data)
+{
+ struct aca_handle *handle;
+ int ret;
+
+ if (list_empty(&mgr->list))
+ return 0;
+
+ list_for_each_entry(handle, &mgr->list, node) {
+ if (!aca_bank_is_valid(handle, bank, type))
+ continue;
+
+ ret = handler(handle, bank, type, data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int aca_dispatch_banks(struct aca_handle_manager *mgr, struct aca_banks *banks,
+ enum aca_smu_type type, bank_handler_t handler, void *data)
+{
+ struct aca_bank_node *node;
+ struct aca_bank *bank;
+ int ret;
+
+ if (!mgr || !banks)
+ return -EINVAL;
+
+ /* pre check to avoid unnecessary operations */
+ if (list_empty(&mgr->list) || list_empty(&banks->list))
+ return 0;
+
+ list_for_each_entry(node, &banks->list, node) {
+ bank = &node->bank;
+
+ ret = aca_dispatch_bank(mgr, bank, type, handler, data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool aca_bank_should_update(struct amdgpu_device *adev, enum aca_smu_type type)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+ bool ret = true;
+
+ /*
+ * Because the UE Valid MCA count will only be cleared after reset,
+ * in order to avoid repeated counting of the error count,
+ * the aca bank is only updated once during the gpu recovery stage.
+ */
+ if (type == ACA_SMU_TYPE_UE) {
+ if (amdgpu_ras_intr_triggered())
+ ret = atomic_cmpxchg(&aca->ue_update_flag, 0, 1) == 0;
+ else
+ atomic_set(&aca->ue_update_flag, 0);
+ }
+
+ return ret;
+}
+
+static void aca_banks_generate_cper(struct amdgpu_device *adev,
+ enum aca_smu_type type,
+ struct aca_banks *banks,
+ int count)
+{
+ struct aca_bank_node *node;
+ struct aca_bank *bank;
+ int r;
+
+ if (!adev->cper.enabled)
+ return;
+
+ if (!banks || !count) {
+ dev_warn(adev->dev, "fail to generate cper records\n");
+ return;
+ }
+
+ /* UEs must be encoded into separate CPER entries */
+ if (type == ACA_SMU_TYPE_UE) {
+ struct aca_banks de_banks;
+
+ aca_banks_init(&de_banks);
+ list_for_each_entry(node, &banks->list, node) {
+ bank = &node->bank;
+ if (bank->aca_err_type == ACA_ERROR_TYPE_DEFERRED) {
+ r = aca_banks_add_bank(&de_banks, bank);
+ if (r)
+ dev_warn(adev->dev, "fail to add de banks, ret = %d\n", r);
+ } else {
+ if (amdgpu_cper_generate_ue_record(adev, bank))
+ dev_warn(adev->dev, "fail to generate ue cper records\n");
+ }
+ }
+
+ if (!list_empty(&de_banks.list)) {
+ if (amdgpu_cper_generate_ce_records(adev, &de_banks, de_banks.nr_banks))
+ dev_warn(adev->dev, "fail to generate de cper records\n");
+ }
+
+ aca_banks_release(&de_banks);
+ } else {
+ /*
+ * SMU_TYPE_CE banks are combined into 1 CPER entries,
+ * they could be CEs or DEs or both
+ */
+ if (amdgpu_cper_generate_ce_records(adev, banks, count))
+ dev_warn(adev->dev, "fail to generate ce cper records\n");
+ }
+}
+
+static int aca_banks_update(struct amdgpu_device *adev, enum aca_smu_type type,
+ bank_handler_t handler, struct ras_query_context *qctx, void *data)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+ struct aca_banks banks;
+ u32 count = 0;
+ int ret;
+
+ if (list_empty(&aca->mgr.list))
+ return 0;
+
+ if (!aca_bank_should_update(adev, type))
+ return 0;
+
+ ret = aca_smu_get_valid_aca_count(adev, type, &count);
+ if (ret)
+ return ret;
+
+ if (!count)
+ return 0;
+
+ aca_banks_init(&banks);
+
+ ret = aca_smu_get_valid_aca_banks(adev, type, 0, count, &banks, qctx);
+ if (ret)
+ goto err_release_banks;
+
+ if (list_empty(&banks.list)) {
+ ret = 0;
+ goto err_release_banks;
+ }
+
+ ret = aca_dispatch_banks(&aca->mgr, &banks, type,
+ handler, data);
+ if (ret)
+ goto err_release_banks;
+
+ aca_banks_generate_cper(adev, type, &banks, count);
+
+err_release_banks:
+ aca_banks_release(&banks);
+
+ return ret;
+}
+
+static int aca_log_aca_error_data(struct aca_bank_error *bank_error, enum aca_error_type type, struct ras_err_data *err_data)
+{
+ struct aca_bank_info *info;
+ struct amdgpu_smuio_mcm_config_info mcm_info;
+ u64 count;
+
+ if (type >= ACA_ERROR_TYPE_COUNT)
+ return -EINVAL;
+
+ count = bank_error->count;
+ if (!count)
+ return 0;
+
+ info = &bank_error->info;
+ mcm_info.die_id = info->die_id;
+ mcm_info.socket_id = info->socket_id;
+
+ switch (type) {
+ case ACA_ERROR_TYPE_UE:
+ amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, count);
+ break;
+ case ACA_ERROR_TYPE_CE:
+ amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, count);
+ break;
+ case ACA_ERROR_TYPE_DEFERRED:
+ amdgpu_ras_error_statistic_de_count(err_data, &mcm_info, count);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int aca_log_aca_error(struct aca_handle *handle, enum aca_error_type type, struct ras_err_data *err_data)
+{
+ struct aca_error_cache *error_cache = &handle->error_cache;
+ struct aca_error *aerr = &error_cache->errors[type];
+ struct aca_bank_error *bank_error, *tmp;
+
+ mutex_lock(&aerr->lock);
+
+ if (list_empty(&aerr->list))
+ goto out_unlock;
+
+ list_for_each_entry_safe(bank_error, tmp, &aerr->list, node) {
+ aca_log_aca_error_data(bank_error, type, err_data);
+ aca_bank_error_remove(aerr, bank_error);
+ }
+
+out_unlock:
+ mutex_unlock(&aerr->lock);
+
+ return 0;
+}
+
+static int __aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle, enum aca_error_type type,
+ struct ras_err_data *err_data, struct ras_query_context *qctx)
+{
+ enum aca_smu_type smu_type;
+ int ret;
+
+ switch (type) {
+ case ACA_ERROR_TYPE_UE:
+ smu_type = ACA_SMU_TYPE_UE;
+ break;
+ case ACA_ERROR_TYPE_CE:
+ case ACA_ERROR_TYPE_DEFERRED:
+ smu_type = ACA_SMU_TYPE_CE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* update aca bank to aca source error_cache first */
+ ret = aca_banks_update(adev, smu_type, handler_aca_log_bank_error, qctx, NULL);
+ if (ret)
+ return ret;
+
+ /* DEs may contain in CEs or UEs */
+ if (type != ACA_ERROR_TYPE_DEFERRED)
+ aca_log_aca_error(handle, ACA_ERROR_TYPE_DEFERRED, err_data);
+
+ return aca_log_aca_error(handle, type, err_data);
+}
+
+static bool aca_handle_is_valid(struct aca_handle *handle)
+{
+ if (!handle->mask || !list_empty(&handle->node))
+ return false;
+
+ return true;
+}
+
+int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle,
+ enum aca_error_type type, struct ras_err_data *err_data,
+ struct ras_query_context *qctx)
+{
+ if (!handle || !err_data)
+ return -EINVAL;
+
+ if (aca_handle_is_valid(handle))
+ return -EOPNOTSUPP;
+
+ if ((type < 0) || (!(BIT(type) & handle->mask)))
+ return 0;
+
+ return __aca_get_error_data(adev, handle, type, err_data, qctx);
+}
+
+static void aca_error_init(struct aca_error *aerr, enum aca_error_type type)
+{
+ mutex_init(&aerr->lock);
+ INIT_LIST_HEAD(&aerr->list);
+ aerr->type = type;
+ aerr->nr_errors = 0;
+}
+
+static void aca_init_error_cache(struct aca_handle *handle)
+{
+ struct aca_error_cache *error_cache = &handle->error_cache;
+ int type;
+
+ for (type = ACA_ERROR_TYPE_UE; type < ACA_ERROR_TYPE_COUNT; type++)
+ aca_error_init(&error_cache->errors[type], type);
+}
+
+static void aca_error_fini(struct aca_error *aerr)
+{
+ struct aca_bank_error *bank_error, *tmp;
+
+ mutex_lock(&aerr->lock);
+ if (list_empty(&aerr->list))
+ goto out_unlock;
+
+ list_for_each_entry_safe(bank_error, tmp, &aerr->list, node)
+ aca_bank_error_remove(aerr, bank_error);
+
+out_unlock:
+ mutex_destroy(&aerr->lock);
+}
+
+static void aca_fini_error_cache(struct aca_handle *handle)
+{
+ struct aca_error_cache *error_cache = &handle->error_cache;
+ int type;
+
+ for (type = ACA_ERROR_TYPE_UE; type < ACA_ERROR_TYPE_COUNT; type++)
+ aca_error_fini(&error_cache->errors[type]);
+}
+
+static int add_aca_handle(struct amdgpu_device *adev, struct aca_handle_manager *mgr, struct aca_handle *handle,
+ const char *name, const struct aca_info *ras_info, void *data)
+{
+ memset(handle, 0, sizeof(*handle));
+
+ handle->adev = adev;
+ handle->mgr = mgr;
+ handle->name = name;
+ handle->hwip = ras_info->hwip;
+ handle->mask = ras_info->mask;
+ handle->bank_ops = ras_info->bank_ops;
+ handle->data = data;
+ aca_init_error_cache(handle);
+
+ INIT_LIST_HEAD(&handle->node);
+ list_add_tail(&handle->node, &mgr->list);
+ mgr->nr_handles++;
+
+ return 0;
+}
+
+static ssize_t aca_sysfs_read(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct aca_handle *handle = container_of(attr, struct aca_handle, aca_attr);
+
+ /* NOTE: the aca cache will be auto cleared once read,
+ * So the driver should unify the query entry point, forward request to ras query interface directly */
+ return amdgpu_ras_aca_sysfs_read(dev, attr, handle, buf, handle->data);
+}
+
+static int add_aca_sysfs(struct amdgpu_device *adev, struct aca_handle *handle)
+{
+ struct device_attribute *aca_attr = &handle->aca_attr;
+
+ snprintf(handle->attr_name, sizeof(handle->attr_name) - 1, "aca_%s", handle->name);
+ aca_attr->show = aca_sysfs_read;
+ aca_attr->attr.name = handle->attr_name;
+ aca_attr->attr.mode = S_IRUGO;
+ sysfs_attr_init(&aca_attr->attr);
+
+ return sysfs_add_file_to_group(&adev->dev->kobj,
+ &aca_attr->attr,
+ "ras");
+}
+
+int amdgpu_aca_add_handle(struct amdgpu_device *adev, struct aca_handle *handle,
+ const char *name, const struct aca_info *ras_info, void *data)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+ int ret;
+
+ if (!amdgpu_aca_is_enabled(adev))
+ return 0;
+
+ ret = add_aca_handle(adev, &aca->mgr, handle, name, ras_info, data);
+ if (ret)
+ return ret;
+
+ return add_aca_sysfs(adev, handle);
+}
+
+static void remove_aca_handle(struct aca_handle *handle)
+{
+ struct aca_handle_manager *mgr = handle->mgr;
+
+ aca_fini_error_cache(handle);
+ list_del(&handle->node);
+ mgr->nr_handles--;
+}
+
+static void remove_aca_sysfs(struct aca_handle *handle)
+{
+ struct amdgpu_device *adev = handle->adev;
+ struct device_attribute *aca_attr = &handle->aca_attr;
+
+ if (adev->dev->kobj.sd)
+ sysfs_remove_file_from_group(&adev->dev->kobj,
+ &aca_attr->attr,
+ "ras");
+}
+
+void amdgpu_aca_remove_handle(struct aca_handle *handle)
+{
+ if (!handle || list_empty(&handle->node))
+ return;
+
+ remove_aca_sysfs(handle);
+ remove_aca_handle(handle);
+}
+
+static int aca_manager_init(struct aca_handle_manager *mgr)
+{
+ INIT_LIST_HEAD(&mgr->list);
+ mgr->nr_handles = 0;
+
+ return 0;
+}
+
+static void aca_manager_fini(struct aca_handle_manager *mgr)
+{
+ struct aca_handle *handle, *tmp;
+
+ if (list_empty(&mgr->list))
+ return;
+
+ list_for_each_entry_safe(handle, tmp, &mgr->list, node)
+ amdgpu_aca_remove_handle(handle);
+}
+
+bool amdgpu_aca_is_enabled(struct amdgpu_device *adev)
+{
+ return (adev->aca.is_enabled ||
+ adev->debug_enable_ras_aca);
+}
+
+int amdgpu_aca_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+ int ret;
+
+ atomic_set(&aca->ue_update_flag, 0);
+
+ ret = aca_manager_init(&aca->mgr);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void amdgpu_aca_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+
+ aca_manager_fini(&aca->mgr);
+
+ atomic_set(&aca->ue_update_flag, 0);
+}
+
+int amdgpu_aca_reset(struct amdgpu_device *adev)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+
+ atomic_set(&aca->ue_update_flag, 0);
+
+ return 0;
+}
+
+void amdgpu_aca_set_smu_funcs(struct amdgpu_device *adev, const struct aca_smu_funcs *smu_funcs)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+
+ WARN_ON(aca->smu_funcs);
+ aca->smu_funcs = smu_funcs;
+}
+
+int aca_bank_info_decode(struct aca_bank *bank, struct aca_bank_info *info)
+{
+ u64 ipid;
+ u32 instidhi, instidlo;
+
+ if (!bank || !info)
+ return -EINVAL;
+
+ ipid = bank->regs[ACA_REG_IDX_IPID];
+ info->hwid = ACA_REG__IPID__HARDWAREID(ipid);
+ info->mcatype = ACA_REG__IPID__MCATYPE(ipid);
+ /*
+ * Unfied DieID Format: SAASS. A:AID, S:Socket.
+ * Unfied DieID[4:4] = InstanceId[0:0]
+ * Unfied DieID[0:3] = InstanceIdHi[0:3]
+ */
+ instidhi = ACA_REG__IPID__INSTANCEIDHI(ipid);
+ instidlo = ACA_REG__IPID__INSTANCEIDLO(ipid);
+ info->die_id = ((instidhi >> 2) & 0x03);
+ info->socket_id = ((instidlo & 0x1) << 2) | (instidhi & 0x03);
+
+ return 0;
+}
+
+static int aca_bank_get_error_code(struct amdgpu_device *adev, struct aca_bank *bank)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+ const struct aca_smu_funcs *smu_funcs = aca->smu_funcs;
+
+ if (!smu_funcs || !smu_funcs->parse_error_code)
+ return -EOPNOTSUPP;
+
+ return smu_funcs->parse_error_code(adev, bank);
+}
+
+int aca_bank_check_error_codes(struct amdgpu_device *adev, struct aca_bank *bank, int *err_codes, int size)
+{
+ int i, error_code;
+
+ if (!bank || !err_codes)
+ return -EINVAL;
+
+ error_code = aca_bank_get_error_code(adev, bank);
+ if (error_code < 0)
+ return error_code;
+
+ for (i = 0; i < size; i++) {
+ if (err_codes[i] == error_code)
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+int amdgpu_aca_smu_set_debug_mode(struct amdgpu_device *adev, bool en)
+{
+ struct amdgpu_aca *aca = &adev->aca;
+ const struct aca_smu_funcs *smu_funcs = aca->smu_funcs;
+
+ if (!smu_funcs || !smu_funcs->set_debug_mode)
+ return -EOPNOTSUPP;
+
+ return smu_funcs->set_debug_mode(adev, en);
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_aca_smu_debug_mode_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ int ret;
+
+ ret = amdgpu_ras_set_aca_debug_mode(adev, val ? true : false);
+ if (ret)
+ return ret;
+
+ dev_info(adev->dev, "amdgpu set smu aca debug mode %s success\n", val ? "on" : "off");
+
+ return 0;
+}
+
+static void aca_dump_entry(struct seq_file *m, struct aca_bank *bank, enum aca_smu_type type, int idx)
+{
+ struct aca_bank_info info;
+ int i, ret;
+
+ ret = aca_bank_info_decode(bank, &info);
+ if (ret)
+ return;
+
+ seq_printf(m, "aca entry[%d].type: %s\n", idx, type == ACA_SMU_TYPE_UE ? "UE" : "CE");
+ seq_printf(m, "aca entry[%d].info: socketid:%d aid:%d hwid:0x%03x mcatype:0x%04x\n",
+ idx, info.socket_id, info.die_id, info.hwid, info.mcatype);
+
+ for (i = 0; i < ARRAY_SIZE(aca_regs); i++)
+ seq_printf(m, "aca entry[%d].regs[%d]: 0x%016llx\n", idx, aca_regs[i].reg_idx, bank->regs[aca_regs[i].reg_idx]);
+}
+
+struct aca_dump_context {
+ struct seq_file *m;
+ int idx;
+};
+
+static int handler_aca_bank_dump(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ struct aca_dump_context *ctx = (struct aca_dump_context *)data;
+
+ aca_dump_entry(ctx->m, bank, type, ctx->idx++);
+
+ return handler_aca_log_bank_error(handle, bank, type, NULL);
+}
+
+static int aca_dump_show(struct seq_file *m, enum aca_smu_type type)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct aca_dump_context context = {
+ .m = m,
+ .idx = 0,
+ };
+
+ return aca_banks_update(adev, type, handler_aca_bank_dump, NULL, (void *)&context);
+}
+
+static int aca_dump_ce_show(struct seq_file *m, void *unused)
+{
+ return aca_dump_show(m, ACA_SMU_TYPE_CE);
+}
+
+static int aca_dump_ce_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, aca_dump_ce_show, inode->i_private);
+}
+
+static const struct file_operations aca_ce_dump_debug_fops = {
+ .owner = THIS_MODULE,
+ .open = aca_dump_ce_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int aca_dump_ue_show(struct seq_file *m, void *unused)
+{
+ return aca_dump_show(m, ACA_SMU_TYPE_UE);
+}
+
+static int aca_dump_ue_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, aca_dump_ue_show, inode->i_private);
+}
+
+static const struct file_operations aca_ue_dump_debug_fops = {
+ .owner = THIS_MODULE,
+ .open = aca_dump_ue_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+DEFINE_DEBUGFS_ATTRIBUTE(aca_debug_mode_fops, NULL, amdgpu_aca_smu_debug_mode_set, "%llu\n");
+#endif
+
+void amdgpu_aca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root)
+{
+#if defined(CONFIG_DEBUG_FS)
+ if (!root)
+ return;
+
+ debugfs_create_file("aca_debug_mode", 0200, root, adev, &aca_debug_mode_fops);
+ debugfs_create_file("aca_ue_dump", 0400, root, adev, &aca_ue_dump_debug_fops);
+ debugfs_create_file("aca_ce_dump", 0400, root, adev, &aca_ce_dump_debug_fops);
+#endif
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
new file mode 100644
index 000000000000..38c88897e1ec
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.h
@@ -0,0 +1,232 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_ACA_H__
+#define __AMDGPU_ACA_H__
+
+#include <linux/list.h>
+
+struct ras_err_data;
+struct ras_query_context;
+
+#define ACA_MAX_REGS_COUNT (16)
+
+#define ACA_REG_FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> l)
+#define ACA_REG__STATUS__VAL(x) ACA_REG_FIELD(x, 63, 63)
+#define ACA_REG__STATUS__OVERFLOW(x) ACA_REG_FIELD(x, 62, 62)
+#define ACA_REG__STATUS__UC(x) ACA_REG_FIELD(x, 61, 61)
+#define ACA_REG__STATUS__EN(x) ACA_REG_FIELD(x, 60, 60)
+#define ACA_REG__STATUS__MISCV(x) ACA_REG_FIELD(x, 59, 59)
+#define ACA_REG__STATUS__ADDRV(x) ACA_REG_FIELD(x, 58, 58)
+#define ACA_REG__STATUS__PCC(x) ACA_REG_FIELD(x, 57, 57)
+#define ACA_REG__STATUS__ERRCOREIDVAL(x) ACA_REG_FIELD(x, 56, 56)
+#define ACA_REG__STATUS__TCC(x) ACA_REG_FIELD(x, 55, 55)
+#define ACA_REG__STATUS__SYNDV(x) ACA_REG_FIELD(x, 53, 53)
+#define ACA_REG__STATUS__CECC(x) ACA_REG_FIELD(x, 46, 46)
+#define ACA_REG__STATUS__UECC(x) ACA_REG_FIELD(x, 45, 45)
+#define ACA_REG__STATUS__DEFERRED(x) ACA_REG_FIELD(x, 44, 44)
+#define ACA_REG__STATUS__POISON(x) ACA_REG_FIELD(x, 43, 43)
+#define ACA_REG__STATUS__SCRUB(x) ACA_REG_FIELD(x, 40, 40)
+#define ACA_REG__STATUS__ERRCOREID(x) ACA_REG_FIELD(x, 37, 32)
+#define ACA_REG__STATUS__ADDRLSB(x) ACA_REG_FIELD(x, 29, 24)
+#define ACA_REG__STATUS__ERRORCODEEXT(x) ACA_REG_FIELD(x, 21, 16)
+#define ACA_REG__STATUS__ERRORCODE(x) ACA_REG_FIELD(x, 15, 0)
+
+#define ACA_REG__IPID__MCATYPE(x) ACA_REG_FIELD(x, 63, 48)
+#define ACA_REG__IPID__INSTANCEIDHI(x) ACA_REG_FIELD(x, 47, 44)
+#define ACA_REG__IPID__HARDWAREID(x) ACA_REG_FIELD(x, 43, 32)
+#define ACA_REG__IPID__INSTANCEIDLO(x) ACA_REG_FIELD(x, 31, 0)
+
+#define ACA_REG__MISC0__VALID(x) ACA_REG_FIELD(x, 63, 63)
+#define ACA_REG__MISC0__OVRFLW(x) ACA_REG_FIELD(x, 48, 48)
+#define ACA_REG__MISC0__ERRCNT(x) ACA_REG_FIELD(x, 43, 32)
+
+#define ACA_REG__SYND__ERRORINFORMATION(x) ACA_REG_FIELD(x, 17, 0)
+
+/* NOTE: The following codes refers to the smu header file */
+#define ACA_EXTERROR_CODE_CE 0x3a
+#define ACA_EXTERROR_CODE_FAULT 0x3b
+
+#define ACA_ERROR_UE_MASK BIT_MASK(ACA_ERROR_TYPE_UE)
+#define ACA_ERROR_CE_MASK BIT_MASK(ACA_ERROR_TYPE_CE)
+#define ACA_ERROR_DEFERRED_MASK BIT_MASK(ACA_ERROR_TYPE_DEFERRED)
+
+#define mmSMNAID_AID0_MCA_SMU 0x03b30400 /* SMN AID AID0 */
+#define mmSMNAID_XCD0_MCA_SMU 0x36430400 /* SMN AID XCD0 */
+#define mmSMNAID_XCD1_MCA_SMU 0x38430400 /* SMN AID XCD1 */
+#define mmSMNXCD_XCD0_MCA_SMU 0x40430400 /* SMN XCD XCD0 */
+
+#define ACA_BANK_ERR_IS_DEFFERED(bank) \
+ (ACA_REG__STATUS__POISON((bank)->regs[ACA_REG_IDX_STATUS]) || \
+ ACA_REG__STATUS__DEFERRED((bank)->regs[ACA_REG_IDX_STATUS]))
+
+enum aca_reg_idx {
+ ACA_REG_IDX_CTL = 0,
+ ACA_REG_IDX_STATUS = 1,
+ ACA_REG_IDX_ADDR = 2,
+ ACA_REG_IDX_MISC0 = 3,
+ ACA_REG_IDX_CONFIG = 4,
+ ACA_REG_IDX_IPID = 5,
+ ACA_REG_IDX_SYND = 6,
+ ACA_REG_IDX_DESTAT = 8,
+ ACA_REG_IDX_DEADDR = 9,
+ ACA_REG_IDX_CTL_MASK = 10,
+ ACA_REG_IDX_COUNT = 16,
+};
+
+enum aca_hwip_type {
+ ACA_HWIP_TYPE_UNKNOW = -1,
+ ACA_HWIP_TYPE_PSP = 0,
+ ACA_HWIP_TYPE_UMC,
+ ACA_HWIP_TYPE_SMU,
+ ACA_HWIP_TYPE_PCS_XGMI,
+ ACA_HWIP_TYPE_COUNT,
+};
+
+enum aca_error_type {
+ ACA_ERROR_TYPE_INVALID = -1,
+ ACA_ERROR_TYPE_UE = 0,
+ ACA_ERROR_TYPE_CE,
+ ACA_ERROR_TYPE_DEFERRED,
+ ACA_ERROR_TYPE_COUNT
+};
+
+enum aca_smu_type {
+ ACA_SMU_TYPE_INVALID = -1,
+ ACA_SMU_TYPE_UE = 0,
+ ACA_SMU_TYPE_CE,
+ ACA_SMU_TYPE_COUNT,
+};
+
+struct aca_hwip {
+ int hwid;
+ int mcatype;
+};
+
+struct aca_bank {
+ enum aca_error_type aca_err_type;
+ enum aca_smu_type smu_err_type;
+ u64 regs[ACA_MAX_REGS_COUNT];
+};
+
+struct aca_bank_node {
+ struct aca_bank bank;
+ struct list_head node;
+};
+
+struct aca_banks {
+ int nr_banks;
+ struct list_head list;
+};
+
+struct aca_bank_info {
+ int die_id;
+ int socket_id;
+ int hwid;
+ int mcatype;
+};
+
+struct aca_bank_error {
+ struct list_head node;
+ struct aca_bank_info info;
+ u64 count;
+};
+
+struct aca_error {
+ struct list_head list;
+ struct mutex lock;
+ enum aca_error_type type;
+ int nr_errors;
+};
+
+struct aca_handle_manager {
+ struct list_head list;
+ int nr_handles;
+};
+
+struct aca_error_cache {
+ struct aca_error errors[ACA_ERROR_TYPE_COUNT];
+};
+
+struct aca_handle {
+ struct list_head node;
+ enum aca_hwip_type hwip;
+ struct amdgpu_device *adev;
+ struct aca_handle_manager *mgr;
+ struct aca_error_cache error_cache;
+ const struct aca_bank_ops *bank_ops;
+ struct device_attribute aca_attr;
+ char attr_name[64];
+ const char *name;
+ u32 mask;
+ void *data;
+};
+
+struct aca_bank_ops {
+ int (*aca_bank_parser)(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type, void *data);
+ bool (*aca_bank_is_valid)(struct aca_handle *handle, struct aca_bank *bank, enum aca_smu_type type,
+ void *data);
+};
+
+struct aca_smu_funcs {
+ int max_ue_bank_count;
+ int max_ce_bank_count;
+ int (*set_debug_mode)(struct amdgpu_device *adev, bool enable);
+ int (*get_valid_aca_count)(struct amdgpu_device *adev, enum aca_smu_type type, u32 *count);
+ int (*get_valid_aca_bank)(struct amdgpu_device *adev, enum aca_smu_type type, int idx, struct aca_bank *bank);
+ int (*parse_error_code)(struct amdgpu_device *adev, struct aca_bank *bank);
+};
+
+struct amdgpu_aca {
+ struct aca_handle_manager mgr;
+ const struct aca_smu_funcs *smu_funcs;
+ atomic_t ue_update_flag;
+ bool is_enabled;
+};
+
+struct aca_info {
+ enum aca_hwip_type hwip;
+ const struct aca_bank_ops *bank_ops;
+ u32 mask;
+};
+
+int amdgpu_aca_init(struct amdgpu_device *adev);
+void amdgpu_aca_fini(struct amdgpu_device *adev);
+int amdgpu_aca_reset(struct amdgpu_device *adev);
+void amdgpu_aca_set_smu_funcs(struct amdgpu_device *adev, const struct aca_smu_funcs *smu_funcs);
+bool amdgpu_aca_is_enabled(struct amdgpu_device *adev);
+
+int aca_bank_info_decode(struct aca_bank *bank, struct aca_bank_info *info);
+int aca_bank_check_error_codes(struct amdgpu_device *adev, struct aca_bank *bank, int *err_codes, int size);
+
+int amdgpu_aca_add_handle(struct amdgpu_device *adev, struct aca_handle *handle,
+ const char *name, const struct aca_info *aca_info, void *data);
+void amdgpu_aca_remove_handle(struct aca_handle *handle);
+int amdgpu_aca_get_error_data(struct amdgpu_device *adev, struct aca_handle *handle,
+ enum aca_error_type type, struct ras_err_data *err_data,
+ struct ras_query_context *qctx);
+int amdgpu_aca_smu_set_debug_mode(struct amdgpu_device *adev, bool en);
+void amdgpu_aca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root);
+int aca_error_cache_log_bank_error(struct aca_handle *handle, struct aca_bank_info *info,
+ enum aca_error_type type, u64 count);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index 06879d1dcabd..4926996f94da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -24,10 +24,13 @@
*/
#include <linux/irqdomain.h>
+#include <linux/pci.h>
#include <linux/pm_domain.h>
#include <linux/platform_device.h>
#include <sound/designware_i2s.h>
#include <sound/pcm.h>
+#include <linux/acpi.h>
+#include <linux/dmi.h>
#include "amdgpu.h"
#include "atom.h"
@@ -35,41 +38,57 @@
#include "acp_gfx_if.h"
-#define ACP_TILE_ON_MASK 0x03
-#define ACP_TILE_OFF_MASK 0x02
-#define ACP_TILE_ON_RETAIN_REG_MASK 0x1f
-#define ACP_TILE_OFF_RETAIN_REG_MASK 0x20
-
-#define ACP_TILE_P1_MASK 0x3e
-#define ACP_TILE_P2_MASK 0x3d
-#define ACP_TILE_DSP0_MASK 0x3b
-#define ACP_TILE_DSP1_MASK 0x37
-
-#define ACP_TILE_DSP2_MASK 0x2f
-
-#define ACP_DMA_REGS_END 0x146c0
-#define ACP_I2S_PLAY_REGS_START 0x14840
-#define ACP_I2S_PLAY_REGS_END 0x148b4
-#define ACP_I2S_CAP_REGS_START 0x148b8
-#define ACP_I2S_CAP_REGS_END 0x1496c
-
-#define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac
-#define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
-#define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
-#define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
-
-#define mmACP_PGFSM_RETAIN_REG 0x51c9
-#define mmACP_PGFSM_CONFIG_REG 0x51ca
-#define mmACP_PGFSM_READ_REG_0 0x51cc
-
-#define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8
-#define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9
-#define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa
-#define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb
-
-#define ACP_TIMEOUT_LOOP 0x000000FF
-#define ACP_DEVS 3
-#define ACP_SRC_ID 162
+#define ST_JADEITE 1
+#define ACP_TILE_ON_MASK 0x03
+#define ACP_TILE_OFF_MASK 0x02
+#define ACP_TILE_ON_RETAIN_REG_MASK 0x1f
+#define ACP_TILE_OFF_RETAIN_REG_MASK 0x20
+
+#define ACP_TILE_P1_MASK 0x3e
+#define ACP_TILE_P2_MASK 0x3d
+#define ACP_TILE_DSP0_MASK 0x3b
+#define ACP_TILE_DSP1_MASK 0x37
+
+#define ACP_TILE_DSP2_MASK 0x2f
+
+#define ACP_DMA_REGS_END 0x146c0
+#define ACP_I2S_PLAY_REGS_START 0x14840
+#define ACP_I2S_PLAY_REGS_END 0x148b4
+#define ACP_I2S_CAP_REGS_START 0x148b8
+#define ACP_I2S_CAP_REGS_END 0x1496c
+
+#define ACP_I2S_COMP1_CAP_REG_OFFSET 0xac
+#define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
+#define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
+#define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
+#define ACP_BT_PLAY_REGS_START 0x14970
+#define ACP_BT_PLAY_REGS_END 0x14a24
+#define ACP_BT_COMP1_REG_OFFSET 0xac
+#define ACP_BT_COMP2_REG_OFFSET 0xa8
+
+#define mmACP_PGFSM_RETAIN_REG 0x51c9
+#define mmACP_PGFSM_CONFIG_REG 0x51ca
+#define mmACP_PGFSM_READ_REG_0 0x51cc
+
+#define mmACP_MEM_SHUT_DOWN_REQ_LO 0x51f8
+#define mmACP_MEM_SHUT_DOWN_REQ_HI 0x51f9
+#define mmACP_MEM_SHUT_DOWN_STS_LO 0x51fa
+#define mmACP_MEM_SHUT_DOWN_STS_HI 0x51fb
+
+#define mmACP_CONTROL 0x5131
+#define mmACP_STATUS 0x5133
+#define mmACP_SOFT_RESET 0x5134
+#define ACP_CONTROL__ClkEn_MASK 0x1
+#define ACP_SOFT_RESET__SoftResetAud_MASK 0x100
+#define ACP_SOFT_RESET__SoftResetAudDone_MASK 0x1000000
+#define ACP_CLOCK_EN_TIME_OUT_VALUE 0x000000FF
+#define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF
+
+#define ACP_TIMEOUT_LOOP 0x000000FF
+#define ACP_DEVS 4
+#define ACP_SRC_ID 162
+
+static unsigned long acp_machine_id;
enum {
ACP_TILE_P1 = 0,
@@ -79,9 +98,9 @@ enum {
ACP_TILE_DSP2,
};
-static int acp_sw_init(void *handle)
+static int acp_sw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->acp.parent = adev->dev;
@@ -93,9 +112,9 @@ static int acp_sw_init(void *handle)
return 0;
}
-static int acp_sw_fini(void *handle)
+static int acp_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (adev->acp.cgs_device)
amdgpu_cgs_destroy_device(adev->acp.cgs_device);
@@ -103,312 +122,435 @@ static int acp_sw_fini(void *handle)
return 0;
}
-/* power off a tile/block within ACP */
-static int acp_suspend_tile(void *cgs_dev, int tile)
-{
- u32 val = 0;
- u32 count = 0;
-
- if ((tile < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
- pr_err("Invalid ACP tile : %d to suspend\n", tile);
- return -1;
- }
-
- val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile);
- val &= ACP_TILE_ON_MASK;
-
- if (val == 0x0) {
- val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
- val = val | (1 << tile);
- cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
- cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG,
- 0x500 + tile);
-
- count = ACP_TIMEOUT_LOOP;
- while (true) {
- val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0
- + tile);
- val = val & ACP_TILE_ON_MASK;
- if (val == ACP_TILE_OFF_MASK)
- break;
- if (--count == 0) {
- pr_err("Timeout reading ACP PGFSM status\n");
- return -ETIMEDOUT;
- }
- udelay(100);
- }
-
- val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
-
- val |= ACP_TILE_OFF_RETAIN_REG_MASK;
- cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
- }
- return 0;
-}
-
-/* power on a tile/block within ACP */
-static int acp_resume_tile(void *cgs_dev, int tile)
-{
- u32 val = 0;
- u32 count = 0;
-
- if ((tile < ACP_TILE_P1) || (tile > ACP_TILE_DSP2)) {
- pr_err("Invalid ACP tile to resume\n");
- return -1;
- }
-
- val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0 + tile);
- val = val & ACP_TILE_ON_MASK;
-
- if (val != 0x0) {
- cgs_write_register(cgs_dev, mmACP_PGFSM_CONFIG_REG,
- 0x600 + tile);
- count = ACP_TIMEOUT_LOOP;
- while (true) {
- val = cgs_read_register(cgs_dev, mmACP_PGFSM_READ_REG_0
- + tile);
- val = val & ACP_TILE_ON_MASK;
- if (val == 0x0)
- break;
- if (--count == 0) {
- pr_err("Timeout reading ACP PGFSM status\n");
- return -ETIMEDOUT;
- }
- udelay(100);
- }
- val = cgs_read_register(cgs_dev, mmACP_PGFSM_RETAIN_REG);
- if (tile == ACP_TILE_P1)
- val = val & (ACP_TILE_P1_MASK);
- else if (tile == ACP_TILE_P2)
- val = val & (ACP_TILE_P2_MASK);
-
- cgs_write_register(cgs_dev, mmACP_PGFSM_RETAIN_REG, val);
- }
- return 0;
-}
-
struct acp_pm_domain {
- void *cgs_dev;
+ void *adev;
struct generic_pm_domain gpd;
};
static int acp_poweroff(struct generic_pm_domain *genpd)
{
- int i, ret;
struct acp_pm_domain *apd;
+ struct amdgpu_device *adev;
apd = container_of(genpd, struct acp_pm_domain, gpd);
- if (apd != NULL) {
- /* Donot return abruptly if any of power tile fails to suspend.
- * Log it and continue powering off other tile
- */
- for (i = 4; i >= 0 ; i--) {
- ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i);
- if (ret)
- pr_err("ACP tile %d tile suspend failed\n", i);
- }
- }
+ adev = apd->adev;
+ /* call smu to POWER GATE ACP block
+ * smu will
+ * 1. turn off the acp clock
+ * 2. power off the acp tiles
+ * 3. check and enter ulv state
+ */
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true, 0);
return 0;
}
static int acp_poweron(struct generic_pm_domain *genpd)
{
- int i, ret;
struct acp_pm_domain *apd;
+ struct amdgpu_device *adev;
apd = container_of(genpd, struct acp_pm_domain, gpd);
- if (apd != NULL) {
- for (i = 0; i < 2; i++) {
- ret = acp_resume_tile(apd->cgs_dev, ACP_TILE_P1 + i);
- if (ret) {
- pr_err("ACP tile %d resume failed\n", i);
- break;
- }
- }
-
- /* Disable DSPs which are not going to be used */
- for (i = 0; i < 3; i++) {
- ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_DSP0 + i);
- /* Continue suspending other DSP, even if one fails */
- if (ret)
- pr_err("ACP DSP %d suspend failed\n", i);
- }
- }
+ adev = apd->adev;
+ /* call smu to UNGATE ACP block
+ * smu will
+ * 1. exit ulv
+ * 2. turn on acp clock
+ * 3. power on acp tiles
+ */
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false, 0);
return 0;
}
-static struct device *get_mfd_cell_dev(const char *device_name, int r)
+static int acp_genpd_add_device(struct device *dev, void *data)
{
- char auto_dev_name[25];
- struct device *dev;
+ struct generic_pm_domain *gpd = data;
+ int ret;
- snprintf(auto_dev_name, sizeof(auto_dev_name),
- "%s.%d.auto", device_name, r);
- dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
- dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
+ ret = pm_genpd_add_device(gpd, dev);
+ if (ret)
+ dev_err(dev, "Failed to add dev to genpd %d\n", ret);
- return dev;
+ return ret;
}
+static int acp_genpd_remove_device(struct device *dev, void *data)
+{
+ int ret;
+
+ ret = pm_genpd_remove_device(dev);
+ if (ret)
+ dev_err(dev, "Failed to remove dev from genpd %d\n", ret);
+
+ /* Continue to remove */
+ return 0;
+}
+
+static int acp_quirk_cb(const struct dmi_system_id *id)
+{
+ acp_machine_id = ST_JADEITE;
+ return 1;
+}
+
+static const struct dmi_system_id acp_quirk_table[] = {
+ {
+ .callback = acp_quirk_cb,
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMD"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Jadeite"),
+ }
+ },
+ {
+ .callback = acp_quirk_cb,
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "IP3 Technology CO.,Ltd."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ASN1D"),
+ },
+ },
+ {
+ .callback = acp_quirk_cb,
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Standard"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ASN10"),
+ },
+ },
+ {}
+};
+
/**
* acp_hw_init - start and test ACP block
*
- * @adev: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int acp_hw_init(void *handle)
+static int acp_hw_init(struct amdgpu_ip_block *ip_block)
{
- int r, i;
- uint64_t acp_base;
- struct device *dev;
- struct i2s_platform_data *i2s_pdata;
-
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- const struct amdgpu_ip_block *ip_block =
- amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_ACP);
+ int r;
+ u64 acp_base;
+ u32 val = 0;
+ u32 count = 0;
+ struct i2s_platform_data *i2s_pdata = NULL;
- if (!ip_block)
- return -EINVAL;
+ struct amdgpu_device *adev = ip_block->adev;
r = amd_acp_hw_init(adev->acp.cgs_device,
ip_block->version->major, ip_block->version->minor);
/* -ENODEV means board uses AZ rather than ACP */
- if (r == -ENODEV)
+ if (r == -ENODEV) {
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true, 0);
return 0;
- else if (r)
+ } else if (r) {
return r;
+ }
- r = cgs_get_pci_resource(adev->acp.cgs_device, CGS_RESOURCE_TYPE_MMIO,
- 0x5289, 0, &acp_base);
- if (r == -ENODEV)
- return 0;
- else if (r)
- return r;
+ if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289)
+ return -EINVAL;
+ acp_base = adev->rmmio_base;
adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL);
- if (adev->acp.acp_genpd == NULL)
+ if (!adev->acp.acp_genpd)
return -ENOMEM;
adev->acp.acp_genpd->gpd.name = "ACP_AUDIO";
adev->acp.acp_genpd->gpd.power_off = acp_poweroff;
adev->acp.acp_genpd->gpd.power_on = acp_poweron;
-
-
- adev->acp.acp_genpd->cgs_dev = adev->acp.cgs_device;
+ adev->acp.acp_genpd->adev = adev;
pm_genpd_init(&adev->acp.acp_genpd->gpd, NULL, false);
+ dmi_check_system(acp_quirk_table);
+ switch (acp_machine_id) {
+ case ST_JADEITE:
+ {
+ adev->acp.acp_cell = kcalloc(2, sizeof(struct mfd_cell),
+ GFP_KERNEL);
+ if (!adev->acp.acp_cell) {
+ r = -ENOMEM;
+ goto failure;
+ }
- adev->acp.acp_cell = kzalloc(sizeof(struct mfd_cell) * ACP_DEVS,
- GFP_KERNEL);
-
- if (adev->acp.acp_cell == NULL)
- return -ENOMEM;
+ adev->acp.acp_res = kcalloc(3, sizeof(struct resource), GFP_KERNEL);
+ if (!adev->acp.acp_res) {
+ r = -ENOMEM;
+ goto failure;
+ }
- adev->acp.acp_res = kzalloc(sizeof(struct resource) * 4, GFP_KERNEL);
+ i2s_pdata = kcalloc(1, sizeof(struct i2s_platform_data), GFP_KERNEL);
+ if (!i2s_pdata) {
+ r = -ENOMEM;
+ goto failure;
+ }
- if (adev->acp.acp_res == NULL) {
- kfree(adev->acp.acp_cell);
- return -ENOMEM;
+ i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
+ DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
+ i2s_pdata[0].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
+ i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
+ i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
+ i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
+
+ adev->acp.acp_res[0].name = "acp2x_dma";
+ adev->acp.acp_res[0].flags = IORESOURCE_MEM;
+ adev->acp.acp_res[0].start = acp_base;
+ adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
+
+ adev->acp.acp_res[1].name = "acp2x_dw_i2s_play_cap";
+ adev->acp.acp_res[1].flags = IORESOURCE_MEM;
+ adev->acp.acp_res[1].start = acp_base + ACP_I2S_CAP_REGS_START;
+ adev->acp.acp_res[1].end = acp_base + ACP_I2S_CAP_REGS_END;
+
+ adev->acp.acp_res[2].name = "acp2x_dma_irq";
+ adev->acp.acp_res[2].flags = IORESOURCE_IRQ;
+ adev->acp.acp_res[2].start = amdgpu_irq_create_mapping(adev, 162);
+ adev->acp.acp_res[2].end = adev->acp.acp_res[2].start;
+
+ adev->acp.acp_cell[0].name = "acp_audio_dma";
+ adev->acp.acp_cell[0].num_resources = 3;
+ adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
+ adev->acp.acp_cell[0].platform_data = &adev->asic_type;
+ adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
+
+ adev->acp.acp_cell[1].name = "designware-i2s";
+ adev->acp.acp_cell[1].num_resources = 1;
+ adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
+ adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
+ adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
+ r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, 2);
+ if (r)
+ goto failure;
+ r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd,
+ acp_genpd_add_device);
+ if (r)
+ goto failure;
+ break;
}
+ default:
+ adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
+ GFP_KERNEL);
- i2s_pdata = kzalloc(sizeof(struct i2s_platform_data) * 2, GFP_KERNEL);
- if (i2s_pdata == NULL) {
- kfree(adev->acp.acp_res);
- kfree(adev->acp.acp_cell);
- return -ENOMEM;
- }
+ if (!adev->acp.acp_cell) {
+ r = -ENOMEM;
+ goto failure;
+ }
- i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
- i2s_pdata[0].cap = DWC_I2S_PLAY;
- i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
- i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
- i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
+ adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
+ if (!adev->acp.acp_res) {
+ r = -ENOMEM;
+ goto failure;
+ }
- i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
+ i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
+ if (!i2s_pdata) {
+ r = -ENOMEM;
+ goto failure;
+ }
+
+ switch (adev->asic_type) {
+ case CHIP_STONEY:
+ i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
+ DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
+ break;
+ default:
+ i2s_pdata[0].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
+ }
+ i2s_pdata[0].cap = DWC_I2S_PLAY;
+ i2s_pdata[0].snd_rates = SNDRV_PCM_RATE_8000_96000;
+ i2s_pdata[0].i2s_reg_comp1 = ACP_I2S_COMP1_PLAY_REG_OFFSET;
+ i2s_pdata[0].i2s_reg_comp2 = ACP_I2S_COMP2_PLAY_REG_OFFSET;
+ switch (adev->asic_type) {
+ case CHIP_STONEY:
+ i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
+ DW_I2S_QUIRK_COMP_PARAM1 |
+ DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
+ break;
+ default:
+ i2s_pdata[1].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET |
DW_I2S_QUIRK_COMP_PARAM1;
- i2s_pdata[1].cap = DWC_I2S_RECORD;
- i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
- i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
- i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
-
- adev->acp.acp_res[0].name = "acp2x_dma";
- adev->acp.acp_res[0].flags = IORESOURCE_MEM;
- adev->acp.acp_res[0].start = acp_base;
- adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
-
- adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
- adev->acp.acp_res[1].flags = IORESOURCE_MEM;
- adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
- adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
-
- adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
- adev->acp.acp_res[2].flags = IORESOURCE_MEM;
- adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
- adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
-
- adev->acp.acp_res[3].name = "acp2x_dma_irq";
- adev->acp.acp_res[3].flags = IORESOURCE_IRQ;
- adev->acp.acp_res[3].start = amdgpu_irq_create_mapping(adev, 162);
- adev->acp.acp_res[3].end = adev->acp.acp_res[3].start;
-
- adev->acp.acp_cell[0].name = "acp_audio_dma";
- adev->acp.acp_cell[0].num_resources = 4;
- adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
-
- adev->acp.acp_cell[1].name = "designware-i2s";
- adev->acp.acp_cell[1].num_resources = 1;
- adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
- adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
- adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
-
- adev->acp.acp_cell[2].name = "designware-i2s";
- adev->acp.acp_cell[2].num_resources = 1;
- adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
- adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
- adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
-
- r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
- ACP_DEVS);
- if (r)
- return r;
+ }
- for (i = 0; i < ACP_DEVS ; i++) {
- dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
- r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
- if (r) {
- dev_err(dev, "Failed to add dev to genpd\n");
- return r;
+ i2s_pdata[1].cap = DWC_I2S_RECORD;
+ i2s_pdata[1].snd_rates = SNDRV_PCM_RATE_8000_96000;
+ i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
+ i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
+
+ i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
+ switch (adev->asic_type) {
+ case CHIP_STONEY:
+ i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
+ break;
+ default:
+ break;
}
+
+ i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
+ i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000;
+ i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET;
+ i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET;
+
+ adev->acp.acp_res[0].name = "acp2x_dma";
+ adev->acp.acp_res[0].flags = IORESOURCE_MEM;
+ adev->acp.acp_res[0].start = acp_base;
+ adev->acp.acp_res[0].end = acp_base + ACP_DMA_REGS_END;
+
+ adev->acp.acp_res[1].name = "acp2x_dw_i2s_play";
+ adev->acp.acp_res[1].flags = IORESOURCE_MEM;
+ adev->acp.acp_res[1].start = acp_base + ACP_I2S_PLAY_REGS_START;
+ adev->acp.acp_res[1].end = acp_base + ACP_I2S_PLAY_REGS_END;
+
+ adev->acp.acp_res[2].name = "acp2x_dw_i2s_cap";
+ adev->acp.acp_res[2].flags = IORESOURCE_MEM;
+ adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
+ adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
+
+ adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap";
+ adev->acp.acp_res[3].flags = IORESOURCE_MEM;
+ adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START;
+ adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END;
+
+ adev->acp.acp_res[4].name = "acp2x_dma_irq";
+ adev->acp.acp_res[4].flags = IORESOURCE_IRQ;
+ adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162);
+ adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
+
+ adev->acp.acp_cell[0].name = "acp_audio_dma";
+ adev->acp.acp_cell[0].num_resources = 5;
+ adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
+ adev->acp.acp_cell[0].platform_data = &adev->asic_type;
+ adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
+
+ adev->acp.acp_cell[1].name = "designware-i2s";
+ adev->acp.acp_cell[1].num_resources = 1;
+ adev->acp.acp_cell[1].resources = &adev->acp.acp_res[1];
+ adev->acp.acp_cell[1].platform_data = &i2s_pdata[0];
+ adev->acp.acp_cell[1].pdata_size = sizeof(struct i2s_platform_data);
+
+ adev->acp.acp_cell[2].name = "designware-i2s";
+ adev->acp.acp_cell[2].num_resources = 1;
+ adev->acp.acp_cell[2].resources = &adev->acp.acp_res[2];
+ adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
+ adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
+
+ adev->acp.acp_cell[3].name = "designware-i2s";
+ adev->acp.acp_cell[3].num_resources = 1;
+ adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
+ adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
+ adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
+
+ r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, ACP_DEVS);
+ if (r)
+ goto failure;
+
+ r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd,
+ acp_genpd_add_device);
+ if (r)
+ goto failure;
}
+ /* Assert Soft reset of ACP */
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
+
+ val |= ACP_SOFT_RESET__SoftResetAud_MASK;
+ cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
+
+ count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
+ while (true) {
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
+ if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
+ (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
+ break;
+ if (--count == 0) {
+ dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
+ r = -ETIMEDOUT;
+ goto failure;
+ }
+ udelay(100);
+ }
+ /* Enable clock to ACP and wait until the clock is enabled */
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
+ val = val | ACP_CONTROL__ClkEn_MASK;
+ cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
+
+ count = ACP_CLOCK_EN_TIME_OUT_VALUE;
+
+ while (true) {
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
+ if (val & (u32) 0x1)
+ break;
+ if (--count == 0) {
+ dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
+ r = -ETIMEDOUT;
+ goto failure;
+ }
+ udelay(100);
+ }
+ /* Deassert the SOFT RESET flags */
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
+ val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
+ cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
return 0;
+
+failure:
+ kfree(i2s_pdata);
+ kfree(adev->acp.acp_res);
+ kfree(adev->acp.acp_cell);
+ kfree(adev->acp.acp_genpd);
+ return r;
}
/**
* acp_hw_fini - stop the hardware block
*
- * @adev: amdgpu_device pointer
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
*
*/
-static int acp_hw_fini(void *handle)
+static int acp_hw_fini(struct amdgpu_ip_block *ip_block)
{
- int i, ret;
- struct device *dev;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 val = 0;
+ u32 count = 0;
+ struct amdgpu_device *adev = ip_block->adev;
/* return early if no ACP */
- if (!adev->acp.acp_genpd)
+ if (!adev->acp.acp_genpd) {
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false, 0);
return 0;
+ }
- for (i = 0; i < ACP_DEVS ; i++) {
- dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
- ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
- /* If removal fails, dont giveup and try rest */
- if (ret)
- dev_err(dev, "remove dev from genpd failed\n");
+ /* Assert Soft reset of ACP */
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
+
+ val |= ACP_SOFT_RESET__SoftResetAud_MASK;
+ cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
+
+ count = ACP_SOFT_RESET_DONE_TIME_OUT_VALUE;
+ while (true) {
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
+ if (ACP_SOFT_RESET__SoftResetAudDone_MASK ==
+ (val & ACP_SOFT_RESET__SoftResetAudDone_MASK))
+ break;
+ if (--count == 0) {
+ dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
+ return -ETIMEDOUT;
+ }
+ udelay(100);
+ }
+ /* Disable ACP clock */
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_CONTROL);
+ val &= ~ACP_CONTROL__ClkEn_MASK;
+ cgs_write_register(adev->acp.cgs_device, mmACP_CONTROL, val);
+
+ count = ACP_CLOCK_EN_TIME_OUT_VALUE;
+
+ while (true) {
+ val = cgs_read_register(adev->acp.cgs_device, mmACP_STATUS);
+ if (val & (u32) 0x1)
+ break;
+ if (--count == 0) {
+ dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
+ return -ETIMEDOUT;
+ }
+ udelay(100);
}
+ device_for_each_child(adev->acp.parent, NULL,
+ acp_genpd_remove_device);
+
mfd_remove_devices(adev->acp.parent);
kfree(adev->acp.acp_res);
kfree(adev->acp.acp_genpd);
@@ -417,52 +559,50 @@ static int acp_hw_fini(void *handle)
return 0;
}
-static int acp_suspend(void *handle)
+static int acp_suspend(struct amdgpu_ip_block *ip_block)
{
- return 0;
-}
+ struct amdgpu_device *adev = ip_block->adev;
-static int acp_resume(void *handle)
-{
+ /* power up on suspend */
+ if (!adev->acp.acp_cell)
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, false, 0);
return 0;
}
-static int acp_early_init(void *handle)
+static int acp_resume(struct amdgpu_ip_block *ip_block)
{
- return 0;
-}
-
-static bool acp_is_idle(void *handle)
-{
- return true;
-}
+ struct amdgpu_device *adev = ip_block->adev;
-static int acp_wait_for_idle(void *handle)
-{
+ /* power down again on resume */
+ if (!adev->acp.acp_cell)
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, true, 0);
return 0;
}
-static int acp_soft_reset(void *handle)
+static bool acp_is_idle(struct amdgpu_ip_block *ip_block)
{
- return 0;
+ return true;
}
-static int acp_set_clockgating_state(void *handle,
+static int acp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int acp_set_powergating_state(void *handle,
+static int acp_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
+ struct amdgpu_device *adev = ip_block->adev;
+ bool enable = (state == AMD_PG_STATE_GATE);
+
+ amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ACP, enable, 0);
+
return 0;
}
static const struct amd_ip_funcs acp_ip_funcs = {
.name = "acp_ip",
- .early_init = acp_early_init,
- .late_init = NULL,
.sw_init = acp_sw_init,
.sw_fini = acp_sw_fini,
.hw_init = acp_hw_init,
@@ -470,14 +610,11 @@ static const struct amd_ip_funcs acp_ip_funcs = {
.suspend = acp_suspend,
.resume = acp_resume,
.is_idle = acp_is_idle,
- .wait_for_idle = acp_wait_for_idle,
- .soft_reset = acp_soft_reset,
.set_clockgating_state = acp_set_clockgating_state,
.set_powergating_state = acp_set_powergating_state,
};
-const struct amdgpu_ip_block_version acp_ip_block =
-{
+const struct amdgpu_ip_block_version acp_ip_block = {
.type = AMD_IP_BLOCK_TYPE_ACP,
.major = 2,
.minor = 2,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index ef79551b4cb7..6c62e27b9800 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2012 Advanced Micro Devices, Inc.
*
@@ -23,33 +24,130 @@
#include <linux/pci.h>
#include <linux/acpi.h>
+#include <linux/backlight.h>
#include <linux/slab.h>
+#include <linux/xarray.h>
#include <linux/power_supply.h>
#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
#include <acpi/video.h>
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
+#include <acpi/actbl.h>
+
#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_display.h"
#include "amd_acpi.h"
#include "atom.h"
-extern void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
+/* Declare GUID for AMD _DSM method for XCCs */
+static const guid_t amd_xcc_dsm_guid = GUID_INIT(0x8267f5d5, 0xa556, 0x44f2,
+ 0xb8, 0xb4, 0x45, 0x56, 0x2e,
+ 0x8c, 0x5b, 0xec);
+
+#define AMD_XCC_HID_START 3000
+#define AMD_XCC_DSM_GET_NUM_FUNCS 0
+#define AMD_XCC_DSM_GET_SUPP_MODE 1
+#define AMD_XCC_DSM_GET_XCP_MODE 2
+#define AMD_XCC_DSM_GET_VF_XCC_MAPPING 4
+#define AMD_XCC_DSM_GET_TMR_INFO 5
+#define AMD_XCC_DSM_NUM_FUNCS 5
+
+#define AMD_XCC_MAX_HID 24
+
+struct xarray numa_info_xa;
+
+/* Encapsulates the XCD acpi object information */
+struct amdgpu_acpi_xcc_info {
+ struct list_head list;
+ struct amdgpu_numa_info *numa_info;
+ uint8_t xcp_node;
+ uint8_t phy_id;
+ acpi_handle handle;
+};
+
+struct amdgpu_acpi_dev_info {
+ struct list_head list;
+ struct list_head xcc_list;
+ uint32_t sbdf;
+ uint16_t supp_xcp_mode;
+ uint16_t xcp_mode;
+ uint16_t mem_mode;
+ uint64_t tmr_base;
+ uint64_t tmr_size;
+};
+
+struct list_head amdgpu_acpi_dev_list;
+
+struct amdgpu_atif_notification_cfg {
+ bool enabled;
+ int command_code;
+};
+
+struct amdgpu_atif_notifications {
+ bool thermal_state;
+ bool forced_power_state;
+ bool system_power_state;
+ bool brightness_change;
+ bool dgpu_display_event;
+ bool gpu_package_power_limit;
+};
+
+struct amdgpu_atif_functions {
+ bool system_params;
+ bool sbios_requests;
+ bool temperature_change;
+ bool query_backlight_transfer_characteristics;
+ bool ready_to_undock;
+ bool external_gpu_information;
+};
+
+struct amdgpu_atif {
+ acpi_handle handle;
+
+ struct amdgpu_atif_notifications notifications;
+ struct amdgpu_atif_functions functions;
+ struct amdgpu_atif_notification_cfg notification_cfg;
+ struct backlight_device *bd;
+ struct amdgpu_dm_backlight_caps backlight_caps;
+};
+
+struct amdgpu_atcs_functions {
+ bool get_ext_state;
+ bool pcie_perf_req;
+ bool pcie_dev_rdy;
+ bool pcie_bus_width;
+ bool power_shift_control;
+};
+
+struct amdgpu_atcs {
+ acpi_handle handle;
+
+ struct amdgpu_atcs_functions functions;
+};
+
+static struct amdgpu_acpi_priv {
+ struct amdgpu_atif atif;
+ struct amdgpu_atcs atcs;
+} amdgpu_acpi_priv;
+
/* Call the ATIF method
*/
/**
* amdgpu_atif_call - call an ATIF method
*
- * @handle: acpi handle
+ * @atif: atif structure
* @function: the ATIF function to execute
* @params: ATIF function params
*
* Executes the requested ATIF function (all asics).
* Returns a pointer to the acpi output buffer.
*/
-static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function,
- struct acpi_buffer *params)
+static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
+ int function,
+ struct acpi_buffer *params)
{
acpi_status status;
+ union acpi_object *obj;
union acpi_object atif_arg_elements[2];
struct acpi_object_list atif_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
@@ -70,17 +168,26 @@ static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function,
atif_arg_elements[1].integer.value = 0;
}
- status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer);
+ status = acpi_evaluate_object(atif->handle, NULL, &atif_arg,
+ &buffer);
+ obj = (union acpi_object *)buffer.pointer;
- /* Fail only if calling the method fails and ATIF is supported */
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ /* Fail if calling the method fails */
+ if (ACPI_FAILURE(status)) {
DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
acpi_format_exception(status));
- kfree(buffer.pointer);
+ kfree(obj);
return NULL;
}
- return buffer.pointer;
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ DRM_DEBUG_DRIVER("bad object returned from ATIF: %d\n",
+ obj->type);
+ kfree(obj);
+ return NULL;
+ }
+
+ return obj;
}
/**
@@ -95,15 +202,12 @@ static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function,
*/
static void amdgpu_atif_parse_notification(struct amdgpu_atif_notifications *n, u32 mask)
{
- n->display_switch = mask & ATIF_DISPLAY_SWITCH_REQUEST_SUPPORTED;
- n->expansion_mode_change = mask & ATIF_EXPANSION_MODE_CHANGE_REQUEST_SUPPORTED;
n->thermal_state = mask & ATIF_THERMAL_STATE_CHANGE_REQUEST_SUPPORTED;
n->forced_power_state = mask & ATIF_FORCED_POWER_STATE_CHANGE_REQUEST_SUPPORTED;
n->system_power_state = mask & ATIF_SYSTEM_POWER_SOURCE_CHANGE_REQUEST_SUPPORTED;
- n->display_conf_change = mask & ATIF_DISPLAY_CONF_CHANGE_REQUEST_SUPPORTED;
- n->px_gfx_switch = mask & ATIF_PX_GFX_SWITCH_REQUEST_SUPPORTED;
n->brightness_change = mask & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST_SUPPORTED;
n->dgpu_display_event = mask & ATIF_DGPU_DISPLAY_EVENT_SUPPORTED;
+ n->gpu_package_power_limit = mask & ATIF_GPU_PACKAGE_POWER_LIMIT_REQUEST_SUPPORTED;
}
/**
@@ -120,20 +224,16 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas
{
f->system_params = mask & ATIF_GET_SYSTEM_PARAMETERS_SUPPORTED;
f->sbios_requests = mask & ATIF_GET_SYSTEM_BIOS_REQUESTS_SUPPORTED;
- f->select_active_disp = mask & ATIF_SELECT_ACTIVE_DISPLAYS_SUPPORTED;
- f->lid_state = mask & ATIF_GET_LID_STATE_SUPPORTED;
- f->get_tv_standard = mask & ATIF_GET_TV_STANDARD_FROM_CMOS_SUPPORTED;
- f->set_tv_standard = mask & ATIF_SET_TV_STANDARD_IN_CMOS_SUPPORTED;
- f->get_panel_expansion_mode = mask & ATIF_GET_PANEL_EXPANSION_MODE_FROM_CMOS_SUPPORTED;
- f->set_panel_expansion_mode = mask & ATIF_SET_PANEL_EXPANSION_MODE_IN_CMOS_SUPPORTED;
f->temperature_change = mask & ATIF_TEMPERATURE_CHANGE_NOTIFICATION_SUPPORTED;
- f->graphics_device_types = mask & ATIF_GET_GRAPHICS_DEVICE_TYPES_SUPPORTED;
+ f->query_backlight_transfer_characteristics =
+ mask & ATIF_QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS_SUPPORTED;
+ f->ready_to_undock = mask & ATIF_READY_TO_UNDOCK_NOTIFICATION_SUPPORTED;
+ f->external_gpu_information = mask & ATIF_GET_EXTERNAL_GPU_INFORMATION_SUPPORTED;
}
/**
* amdgpu_atif_verify_interface - verify ATIF
*
- * @handle: acpi handle
* @atif: amdgpu atif struct
*
* Execute the ATIF_FUNCTION_VERIFY_INTERFACE ATIF function
@@ -141,15 +241,14 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas
* (all asics).
* returns 0 on success, error on failure.
*/
-static int amdgpu_atif_verify_interface(acpi_handle handle,
- struct amdgpu_atif *atif)
+static int amdgpu_atif_verify_interface(struct amdgpu_atif *atif)
{
union acpi_object *info;
struct atif_verify_interface output;
size_t size;
int err = 0;
- info = amdgpu_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
+ info = amdgpu_atif_call(atif, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
if (!info)
return -EIO;
@@ -179,8 +278,7 @@ out:
/**
* amdgpu_atif_get_notification_params - determine notify configuration
*
- * @handle: acpi handle
- * @n: atif notification configuration struct
+ * @atif: acpi handle
*
* Execute the ATIF_FUNCTION_GET_SYSTEM_PARAMETERS ATIF function
* to determine if a notifier is used and if so which one
@@ -188,15 +286,16 @@ out:
* where n is specified in the result if a notifier is used.
* Returns 0 on success, error on failure.
*/
-static int amdgpu_atif_get_notification_params(acpi_handle handle,
- struct amdgpu_atif_notification_cfg *n)
+static int amdgpu_atif_get_notification_params(struct amdgpu_atif *atif)
{
union acpi_object *info;
+ struct amdgpu_atif_notification_cfg *n = &atif->notification_cfg;
struct atif_system_params params;
size_t size;
int err = 0;
- info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL);
+ info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS,
+ NULL);
if (!info) {
err = -EIO;
goto out;
@@ -240,9 +339,74 @@ out:
}
/**
+ * amdgpu_atif_query_backlight_caps - get min and max backlight input signal
+ *
+ * @atif: acpi handle
+ *
+ * Execute the QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS ATIF function
+ * to determine the acceptable range of backlight values
+ *
+ * Backlight_caps.caps_valid will be set to true if the query is successful
+ *
+ * The input signals are in range 0-255
+ *
+ * This function assumes the display with backlight is the first LCD
+ *
+ * Returns 0 on success, error on failure.
+ */
+static int amdgpu_atif_query_backlight_caps(struct amdgpu_atif *atif)
+{
+ union acpi_object *info;
+ struct atif_qbtc_output characteristics;
+ struct atif_qbtc_arguments arguments;
+ struct acpi_buffer params;
+ size_t size;
+ int err = 0;
+
+ arguments.size = sizeof(arguments);
+ arguments.requested_display = ATIF_QBTC_REQUEST_LCD1;
+
+ params.length = sizeof(arguments);
+ params.pointer = (void *)&arguments;
+
+ info = amdgpu_atif_call(atif,
+ ATIF_FUNCTION_QUERY_BRIGHTNESS_TRANSFER_CHARACTERISTICS,
+ &params);
+ if (!info) {
+ err = -EIO;
+ goto out;
+ }
+
+ size = *(u16 *) info->buffer.pointer;
+ if (size < 10) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ memset(&characteristics, 0, sizeof(characteristics));
+ size = min(sizeof(characteristics), size);
+ memcpy(&characteristics, info->buffer.pointer, size);
+
+ atif->backlight_caps.caps_valid = true;
+ atif->backlight_caps.min_input_signal =
+ characteristics.min_input_signal;
+ atif->backlight_caps.max_input_signal =
+ characteristics.max_input_signal;
+ atif->backlight_caps.ac_level = characteristics.ac_level;
+ atif->backlight_caps.dc_level = characteristics.dc_level;
+ atif->backlight_caps.data_points = characteristics.number_of_points;
+ memcpy(atif->backlight_caps.luminance_data,
+ characteristics.data_points,
+ sizeof(atif->backlight_caps.luminance_data));
+out:
+ kfree(info);
+ return err;
+}
+
+/**
* amdgpu_atif_get_sbios_requests - get requested sbios event
*
- * @handle: acpi handle
+ * @atif: acpi handle
* @req: atif sbios request struct
*
* Execute the ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS ATIF function
@@ -250,14 +414,15 @@ out:
* (all asics).
* Returns 0 on success, error on failure.
*/
-static int amdgpu_atif_get_sbios_requests(acpi_handle handle,
- struct atif_sbios_requests *req)
+static int amdgpu_atif_get_sbios_requests(struct amdgpu_atif *atif,
+ struct atif_sbios_requests *req)
{
union acpi_object *info;
size_t size;
int count = 0;
- info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL);
+ info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS,
+ NULL);
if (!info)
return -EIO;
@@ -287,14 +452,14 @@ out:
*
* Checks the acpi event and if it matches an atif event,
* handles it.
- * Returns NOTIFY code
+ *
+ * Returns:
+ * NOTIFY_BAD or NOTIFY_DONE, depending on the event.
*/
-int amdgpu_atif_handler(struct amdgpu_device *adev,
- struct acpi_bus_event *event)
+static int amdgpu_atif_handler(struct amdgpu_device *adev,
+ struct acpi_bus_event *event)
{
- struct amdgpu_atif *atif = &adev->atif;
- struct atif_sbios_requests req;
- acpi_handle handle;
+ struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
int count;
DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
@@ -303,48 +468,51 @@ int amdgpu_atif_handler(struct amdgpu_device *adev,
if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
return NOTIFY_DONE;
+ /* Is this actually our event? */
if (!atif->notification_cfg.enabled ||
- event->type != atif->notification_cfg.command_code)
- /* Not our event */
- return NOTIFY_DONE;
-
- /* Check pending SBIOS requests */
- handle = ACPI_HANDLE(&adev->pdev->dev);
- count = amdgpu_atif_get_sbios_requests(handle, &req);
-
- if (count <= 0)
- return NOTIFY_DONE;
-
- DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
+ event->type != atif->notification_cfg.command_code) {
+ /* These events will generate keypresses otherwise */
+ if (event->type == ACPI_VIDEO_NOTIFY_PROBE)
+ return NOTIFY_BAD;
+ else
+ return NOTIFY_DONE;
+ }
- if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) {
- struct amdgpu_encoder *enc = atif->encoder_for_bl;
+ if (atif->functions.sbios_requests) {
+ struct atif_sbios_requests req;
- if (enc) {
- struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
+ /* Check pending SBIOS requests */
+ count = amdgpu_atif_get_sbios_requests(atif, &req);
- DRM_DEBUG_DRIVER("Changing brightness to %d\n",
- req.backlight_level);
+ if (count <= 0)
+ return NOTIFY_BAD;
- amdgpu_display_backlight_set_level(adev, enc, req.backlight_level);
+ DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
-#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
- backlight_force_update(dig->bl_dev,
- BACKLIGHT_UPDATE_HOTKEY);
-#endif
+ if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) {
+ if (atif->bd) {
+ DRM_DEBUG_DRIVER("Changing brightness to %d\n",
+ req.backlight_level);
+ /*
+ * XXX backlight_device_set_brightness() is
+ * hardwired to post BACKLIGHT_UPDATE_SYSFS.
+ * It probably should accept 'reason' parameter.
+ */
+ backlight_device_set_brightness(atif->bd, req.backlight_level);
+ }
}
- }
- if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
- if ((adev->flags & AMD_IS_PX) &&
- amdgpu_atpx_dgpu_req_power_for_displays()) {
- pm_runtime_get_sync(adev->ddev->dev);
- /* Just fire off a uevent and let userspace tell us what to do */
- drm_helper_hpd_irq_event(adev->ddev);
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
+ if (adev->flags & AMD_IS_PX) {
+ pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ /* Just fire off a uevent and let userspace tell us what to do */
+ drm_helper_hpd_irq_event(adev_to_drm(adev));
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ }
}
+ /* TODO: check other events */
}
- /* TODO: check other events */
/* We've handled the event, stop the notifier chain. The ACPI interface
* overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to
@@ -359,14 +527,15 @@ int amdgpu_atif_handler(struct amdgpu_device *adev,
/**
* amdgpu_atcs_call - call an ATCS method
*
- * @handle: acpi handle
+ * @atcs: atcs structure
* @function: the ATCS function to execute
* @params: ATCS function params
*
* Executes the requested ATCS function (all asics).
* Returns a pointer to the acpi output buffer.
*/
-static union acpi_object *amdgpu_atcs_call(acpi_handle handle, int function,
+static union acpi_object *amdgpu_atcs_call(struct amdgpu_atcs *atcs,
+ int function,
struct acpi_buffer *params)
{
acpi_status status;
@@ -390,7 +559,7 @@ static union acpi_object *amdgpu_atcs_call(acpi_handle handle, int function,
atcs_arg_elements[1].integer.value = 0;
}
- status = acpi_evaluate_object(handle, "ATCS", &atcs_arg, &buffer);
+ status = acpi_evaluate_object(atcs->handle, NULL, &atcs_arg, &buffer);
/* Fail only if calling the method fails and ATIF is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
@@ -419,12 +588,12 @@ static void amdgpu_atcs_parse_functions(struct amdgpu_atcs_functions *f, u32 mas
f->pcie_perf_req = mask & ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED;
f->pcie_dev_rdy = mask & ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED;
f->pcie_bus_width = mask & ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED;
+ f->power_shift_control = mask & ATCS_SET_POWER_SHIFT_CONTROL_SUPPORTED;
}
/**
* amdgpu_atcs_verify_interface - verify ATCS
*
- * @handle: acpi handle
* @atcs: amdgpu atcs struct
*
* Execute the ATCS_FUNCTION_VERIFY_INTERFACE ATCS function
@@ -432,15 +601,14 @@ static void amdgpu_atcs_parse_functions(struct amdgpu_atcs_functions *f, u32 mas
* (all asics).
* returns 0 on success, error on failure.
*/
-static int amdgpu_atcs_verify_interface(acpi_handle handle,
- struct amdgpu_atcs *atcs)
+static int amdgpu_atcs_verify_interface(struct amdgpu_atcs *atcs)
{
union acpi_object *info;
struct atcs_verify_interface output;
size_t size;
int err = 0;
- info = amdgpu_atcs_call(handle, ATCS_FUNCTION_VERIFY_INTERFACE, NULL);
+ info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_VERIFY_INTERFACE, NULL);
if (!info)
return -EIO;
@@ -477,7 +645,7 @@ out:
*/
bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev)
{
- struct amdgpu_atcs *atcs = &adev->atcs;
+ struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs;
if (atcs->functions.pcie_perf_req && atcs->functions.pcie_dev_rdy)
return true;
@@ -486,6 +654,18 @@ bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *ade
}
/**
+ * amdgpu_acpi_is_power_shift_control_supported
+ *
+ * Check if the ATCS power shift control method
+ * is supported.
+ * returns true if supported, false if not.
+ */
+bool amdgpu_acpi_is_power_shift_control_supported(void)
+{
+ return amdgpu_acpi_priv.atcs.functions.power_shift_control;
+}
+
+/**
* amdgpu_acpi_pcie_notify_device_ready
*
* @adev: amdgpu_device pointer
@@ -496,19 +676,13 @@ bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *ade
*/
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev)
{
- acpi_handle handle;
union acpi_object *info;
- struct amdgpu_atcs *atcs = &adev->atcs;
-
- /* Get the device handle */
- handle = ACPI_HANDLE(&adev->pdev->dev);
- if (!handle)
- return -EINVAL;
+ struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs;
if (!atcs->functions.pcie_dev_rdy)
return -EINVAL;
- info = amdgpu_atcs_call(handle, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, NULL);
+ info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, NULL);
if (!info)
return -EIO;
@@ -531,18 +705,15 @@ int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev)
int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
u8 perf_req, bool advertise)
{
- acpi_handle handle;
union acpi_object *info;
- struct amdgpu_atcs *atcs = &adev->atcs;
+ struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs;
struct atcs_pref_req_input atcs_input;
struct atcs_pref_req_output atcs_output;
struct acpi_buffer params;
size_t size;
u32 retry = 3;
- /* Get the device handle */
- handle = ACPI_HANDLE(&adev->pdev->dev);
- if (!handle)
+ if (amdgpu_acpi_pcie_notify_device_ready(adev))
return -EINVAL;
if (!atcs->functions.pcie_perf_req)
@@ -550,7 +721,7 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
atcs_input.size = sizeof(struct atcs_pref_req_input);
/* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
- atcs_input.client_id = adev->pdev->devfn | (adev->pdev->bus->number << 8);
+ atcs_input.client_id = pci_dev_id(adev->pdev);
atcs_input.valid_flags_mask = ATCS_VALID_FLAGS_MASK;
atcs_input.flags = ATCS_WAIT_FOR_COMPLETION;
if (advertise)
@@ -562,7 +733,7 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
params.pointer = &atcs_input;
while (retry--) {
- info = amdgpu_atcs_call(handle, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, &params);
+ info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, &params);
if (!info)
return -EIO;
@@ -596,6 +767,438 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
}
/**
+ * amdgpu_acpi_power_shift_control
+ *
+ * @adev: amdgpu_device pointer
+ * @dev_state: device acpi state
+ * @drv_state: driver state
+ *
+ * Executes the POWER_SHIFT_CONTROL method to
+ * communicate current dGPU device state and
+ * driver state to APU/SBIOS.
+ * returns 0 on success, error on failure.
+ */
+int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
+ u8 dev_state, bool drv_state)
+{
+ union acpi_object *info;
+ struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs;
+ struct atcs_pwr_shift_input atcs_input;
+ struct acpi_buffer params;
+
+ if (!amdgpu_acpi_is_power_shift_control_supported())
+ return -EINVAL;
+
+ atcs_input.size = sizeof(struct atcs_pwr_shift_input);
+ /* dGPU id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
+ atcs_input.dgpu_id = pci_dev_id(adev->pdev);
+ atcs_input.dev_acpi_state = dev_state;
+ atcs_input.drv_state = drv_state;
+
+ params.length = sizeof(struct atcs_pwr_shift_input);
+ params.pointer = &atcs_input;
+
+ info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_POWER_SHIFT_CONTROL, &params);
+ if (!info) {
+ DRM_ERROR("ATCS PSC update failed\n");
+ return -EIO;
+ }
+
+ kfree(info);
+ return 0;
+}
+
+/**
+ * amdgpu_acpi_smart_shift_update - update dGPU device state to SBIOS
+ *
+ * @adev: amdgpu device pointer
+ * @ss_state: current smart shift event
+ *
+ * returns 0 on success,
+ * otherwise return error number.
+ */
+int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev,
+ enum amdgpu_ss ss_state)
+{
+ int r;
+
+ if (!amdgpu_device_supports_smart_shift(adev))
+ return 0;
+
+ switch (ss_state) {
+ /* SBIOS trigger “stop”, “enable” and “start” at D0, Driver Operational.
+ * SBIOS trigger “stop” at D3, Driver Not Operational.
+ * SBIOS trigger “stop” and “disable” at D0, Driver NOT operational.
+ */
+ case AMDGPU_SS_DRV_LOAD:
+ r = amdgpu_acpi_power_shift_control(adev,
+ AMDGPU_ATCS_PSC_DEV_STATE_D0,
+ AMDGPU_ATCS_PSC_DRV_STATE_OPR);
+ break;
+ case AMDGPU_SS_DEV_D0:
+ r = amdgpu_acpi_power_shift_control(adev,
+ AMDGPU_ATCS_PSC_DEV_STATE_D0,
+ AMDGPU_ATCS_PSC_DRV_STATE_OPR);
+ break;
+ case AMDGPU_SS_DEV_D3:
+ r = amdgpu_acpi_power_shift_control(adev,
+ AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT,
+ AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR);
+ break;
+ case AMDGPU_SS_DRV_UNLOAD:
+ r = amdgpu_acpi_power_shift_control(adev,
+ AMDGPU_ATCS_PSC_DEV_STATE_D0,
+ AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return r;
+}
+
+#ifdef CONFIG_ACPI_NUMA
+static inline uint64_t amdgpu_acpi_get_numa_size(int nid)
+{
+ /* This is directly using si_meminfo_node implementation as the
+ * function is not exported.
+ */
+ int zone_type;
+ uint64_t managed_pages = 0;
+
+ pg_data_t *pgdat = NODE_DATA(nid);
+
+ for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
+ managed_pages +=
+ zone_managed_pages(&pgdat->node_zones[zone_type]);
+ return managed_pages * PAGE_SIZE;
+}
+
+static struct amdgpu_numa_info *amdgpu_acpi_get_numa_info(uint32_t pxm)
+{
+ struct amdgpu_numa_info *numa_info;
+ int nid;
+
+ numa_info = xa_load(&numa_info_xa, pxm);
+
+ if (!numa_info) {
+ struct sysinfo info;
+
+ numa_info = kzalloc(sizeof(*numa_info), GFP_KERNEL);
+ if (!numa_info)
+ return NULL;
+
+ nid = pxm_to_node(pxm);
+ numa_info->pxm = pxm;
+ numa_info->nid = nid;
+
+ if (numa_info->nid == NUMA_NO_NODE) {
+ si_meminfo(&info);
+ numa_info->size = info.totalram * info.mem_unit;
+ } else {
+ numa_info->size = amdgpu_acpi_get_numa_size(nid);
+ }
+ xa_store(&numa_info_xa, numa_info->pxm, numa_info, GFP_KERNEL);
+ }
+
+ return numa_info;
+}
+#endif
+
+/**
+ * amdgpu_acpi_get_node_id - obtain the NUMA node id for corresponding amdgpu
+ * acpi device handle
+ *
+ * @handle: acpi handle
+ * @numa_info: amdgpu_numa_info structure holding numa information
+ *
+ * Queries the ACPI interface to fetch the corresponding NUMA Node ID for a
+ * given amdgpu acpi device.
+ *
+ * Returns ACPI STATUS OK with Node ID on success or the corresponding failure reason
+ */
+static acpi_status amdgpu_acpi_get_node_id(acpi_handle handle,
+ struct amdgpu_numa_info **numa_info)
+{
+#ifdef CONFIG_ACPI_NUMA
+ u64 pxm;
+ acpi_status status;
+
+ if (!numa_info)
+ return_ACPI_STATUS(AE_ERROR);
+
+ status = acpi_evaluate_integer(handle, "_PXM", NULL, &pxm);
+
+ if (ACPI_FAILURE(status))
+ return status;
+
+ *numa_info = amdgpu_acpi_get_numa_info(pxm);
+
+ if (!*numa_info)
+ return_ACPI_STATUS(AE_ERROR);
+
+ return_ACPI_STATUS(AE_OK);
+#else
+ return_ACPI_STATUS(AE_NOT_EXIST);
+#endif
+}
+
+static struct amdgpu_acpi_dev_info *amdgpu_acpi_get_dev(u32 sbdf)
+{
+ struct amdgpu_acpi_dev_info *acpi_dev;
+
+ if (list_empty(&amdgpu_acpi_dev_list))
+ return NULL;
+
+ list_for_each_entry(acpi_dev, &amdgpu_acpi_dev_list, list)
+ if (acpi_dev->sbdf == sbdf)
+ return acpi_dev;
+
+ return NULL;
+}
+
+static int amdgpu_acpi_dev_init(struct amdgpu_acpi_dev_info **dev_info,
+ struct amdgpu_acpi_xcc_info *xcc_info, u32 sbdf)
+{
+ struct amdgpu_acpi_dev_info *tmp;
+ union acpi_object *obj;
+ int ret = -ENOENT;
+
+ *dev_info = NULL;
+ tmp = kzalloc(sizeof(struct amdgpu_acpi_dev_info), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&tmp->xcc_list);
+ INIT_LIST_HEAD(&tmp->list);
+ tmp->sbdf = sbdf;
+
+ obj = acpi_evaluate_dsm_typed(xcc_info->handle, &amd_xcc_dsm_guid, 0,
+ AMD_XCC_DSM_GET_SUPP_MODE, NULL,
+ ACPI_TYPE_INTEGER);
+
+ if (!obj) {
+ acpi_handle_debug(xcc_info->handle,
+ "_DSM function %d evaluation failed",
+ AMD_XCC_DSM_GET_SUPP_MODE);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ tmp->supp_xcp_mode = obj->integer.value & 0xFFFF;
+ ACPI_FREE(obj);
+
+ obj = acpi_evaluate_dsm_typed(xcc_info->handle, &amd_xcc_dsm_guid, 0,
+ AMD_XCC_DSM_GET_XCP_MODE, NULL,
+ ACPI_TYPE_INTEGER);
+
+ if (!obj) {
+ acpi_handle_debug(xcc_info->handle,
+ "_DSM function %d evaluation failed",
+ AMD_XCC_DSM_GET_XCP_MODE);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ tmp->xcp_mode = obj->integer.value & 0xFFFF;
+ tmp->mem_mode = (obj->integer.value >> 32) & 0xFFFF;
+ ACPI_FREE(obj);
+
+ /* Evaluate DSMs and fill XCC information */
+ obj = acpi_evaluate_dsm_typed(xcc_info->handle, &amd_xcc_dsm_guid, 0,
+ AMD_XCC_DSM_GET_TMR_INFO, NULL,
+ ACPI_TYPE_PACKAGE);
+
+ if (!obj || obj->package.count < 2) {
+ acpi_handle_debug(xcc_info->handle,
+ "_DSM function %d evaluation failed",
+ AMD_XCC_DSM_GET_TMR_INFO);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ tmp->tmr_base = obj->package.elements[0].integer.value;
+ tmp->tmr_size = obj->package.elements[1].integer.value;
+ ACPI_FREE(obj);
+
+ DRM_DEBUG_DRIVER(
+ "New dev(%x): Supported xcp mode: %x curr xcp_mode : %x mem mode : %x, tmr base: %llx tmr size: %llx ",
+ tmp->sbdf, tmp->supp_xcp_mode, tmp->xcp_mode, tmp->mem_mode,
+ tmp->tmr_base, tmp->tmr_size);
+ list_add_tail(&tmp->list, &amdgpu_acpi_dev_list);
+ *dev_info = tmp;
+
+ return 0;
+
+out:
+ if (obj)
+ ACPI_FREE(obj);
+ kfree(tmp);
+
+ return ret;
+}
+
+static int amdgpu_acpi_get_xcc_info(struct amdgpu_acpi_xcc_info *xcc_info,
+ u32 *sbdf)
+{
+ union acpi_object *obj;
+ acpi_status status;
+ int ret = -ENOENT;
+
+ obj = acpi_evaluate_dsm_typed(xcc_info->handle, &amd_xcc_dsm_guid, 0,
+ AMD_XCC_DSM_GET_NUM_FUNCS, NULL,
+ ACPI_TYPE_INTEGER);
+
+ if (!obj || obj->integer.value != AMD_XCC_DSM_NUM_FUNCS)
+ goto out;
+ ACPI_FREE(obj);
+
+ /* Evaluate DSMs and fill XCC information */
+ obj = acpi_evaluate_dsm_typed(xcc_info->handle, &amd_xcc_dsm_guid, 0,
+ AMD_XCC_DSM_GET_VF_XCC_MAPPING, NULL,
+ ACPI_TYPE_INTEGER);
+
+ if (!obj) {
+ acpi_handle_debug(xcc_info->handle,
+ "_DSM function %d evaluation failed",
+ AMD_XCC_DSM_GET_VF_XCC_MAPPING);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* PF xcc id [39:32] */
+ xcc_info->phy_id = (obj->integer.value >> 32) & 0xFF;
+ /* xcp node of this xcc [47:40] */
+ xcc_info->xcp_node = (obj->integer.value >> 40) & 0xFF;
+ /* PF domain of this xcc [31:16] */
+ *sbdf = (obj->integer.value) & 0xFFFF0000;
+ /* PF bus/dev/fn of this xcc [63:48] */
+ *sbdf |= (obj->integer.value >> 48) & 0xFFFF;
+ ACPI_FREE(obj);
+ obj = NULL;
+
+ status =
+ amdgpu_acpi_get_node_id(xcc_info->handle, &xcc_info->numa_info);
+
+ /* TODO: check if this check is required */
+ if (ACPI_SUCCESS(status))
+ ret = 0;
+out:
+ if (obj)
+ ACPI_FREE(obj);
+
+ return ret;
+}
+
+static int amdgpu_acpi_enumerate_xcc(void)
+{
+ struct amdgpu_acpi_dev_info *dev_info = NULL;
+ struct amdgpu_acpi_xcc_info *xcc_info;
+ struct acpi_device *acpi_dev;
+ char hid[ACPI_ID_LEN];
+ int ret, id;
+ u32 sbdf;
+
+ INIT_LIST_HEAD(&amdgpu_acpi_dev_list);
+ xa_init(&numa_info_xa);
+
+ for (id = 0; id < AMD_XCC_MAX_HID; id++) {
+ sprintf(hid, "%s%d", "AMD", AMD_XCC_HID_START + id);
+ acpi_dev = acpi_dev_get_first_match_dev(hid, NULL, -1);
+ /* These ACPI objects are expected to be in sequential order. If
+ * one is not found, no need to check the rest.
+ */
+ if (!acpi_dev) {
+ DRM_DEBUG_DRIVER("No matching acpi device found for %s",
+ hid);
+ break;
+ }
+
+ xcc_info = kzalloc(sizeof(struct amdgpu_acpi_xcc_info),
+ GFP_KERNEL);
+ if (!xcc_info) {
+ DRM_ERROR("Failed to allocate memory for xcc info\n");
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&xcc_info->list);
+ xcc_info->handle = acpi_device_handle(acpi_dev);
+ acpi_dev_put(acpi_dev);
+
+ ret = amdgpu_acpi_get_xcc_info(xcc_info, &sbdf);
+ if (ret) {
+ kfree(xcc_info);
+ continue;
+ }
+
+ dev_info = amdgpu_acpi_get_dev(sbdf);
+
+ if (!dev_info)
+ ret = amdgpu_acpi_dev_init(&dev_info, xcc_info, sbdf);
+
+ if (ret == -ENOMEM)
+ return ret;
+
+ if (!dev_info) {
+ kfree(xcc_info);
+ continue;
+ }
+
+ list_add_tail(&xcc_info->list, &dev_info->xcc_list);
+ }
+
+ return 0;
+}
+
+int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset,
+ u64 *tmr_size)
+{
+ struct amdgpu_acpi_dev_info *dev_info;
+ u32 sbdf;
+
+ if (!tmr_offset || !tmr_size)
+ return -EINVAL;
+
+ sbdf = (pci_domain_nr(adev->pdev->bus) << 16);
+ sbdf |= pci_dev_id(adev->pdev);
+ dev_info = amdgpu_acpi_get_dev(sbdf);
+ if (!dev_info)
+ return -ENOENT;
+
+ *tmr_offset = dev_info->tmr_base;
+ *tmr_size = dev_info->tmr_size;
+
+ return 0;
+}
+
+int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, int xcc_id,
+ struct amdgpu_numa_info *numa_info)
+{
+ struct amdgpu_acpi_dev_info *dev_info;
+ struct amdgpu_acpi_xcc_info *xcc_info;
+ u32 sbdf;
+
+ if (!numa_info)
+ return -EINVAL;
+
+ sbdf = (pci_domain_nr(adev->pdev->bus) << 16);
+ sbdf |= pci_dev_id(adev->pdev);
+ dev_info = amdgpu_acpi_get_dev(sbdf);
+ if (!dev_info)
+ return -ENOENT;
+
+ list_for_each_entry(xcc_info, &dev_info->xcc_list, list) {
+ if (xcc_info->phy_id == xcc_id) {
+ memcpy(numa_info, xcc_info->numa_info,
+ sizeof(*numa_info));
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+/**
* amdgpu_acpi_event - handle notify events
*
* @nb: notifier block
@@ -638,47 +1241,181 @@ static int amdgpu_acpi_event(struct notifier_block *nb,
*/
int amdgpu_acpi_init(struct amdgpu_device *adev)
{
- acpi_handle handle;
- struct amdgpu_atif *atif = &adev->atif;
- struct amdgpu_atcs *atcs = &adev->atcs;
+ struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
+
+ if (atif->notifications.brightness_change) {
+ if (adev->dc_enabled) {
+#if defined(CONFIG_DRM_AMD_DC)
+ struct amdgpu_display_manager *dm = &adev->dm;
+
+ if (dm->backlight_dev[0])
+ atif->bd = dm->backlight_dev[0];
+#endif
+ } else {
+ struct drm_encoder *tmp;
+
+ /* Find the encoder controlling the brightness */
+ list_for_each_entry(tmp, &adev_to_drm(adev)->mode_config.encoder_list,
+ head) {
+ struct amdgpu_encoder *enc = to_amdgpu_encoder(tmp);
+
+ if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
+ enc->enc_priv) {
+ struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
+
+ if (dig->bl_dev) {
+ atif->bd = dig->bl_dev;
+ break;
+ }
+ }
+ }
+ }
+ }
+ adev->acpi_nb.notifier_call = amdgpu_acpi_event;
+ register_acpi_notifier(&adev->acpi_nb);
+
+ return 0;
+}
+
+void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps)
+{
+ struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
+
+ memcpy(caps, &atif->backlight_caps, sizeof(*caps));
+}
+
+/**
+ * amdgpu_acpi_fini - tear down driver acpi support
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Unregisters with the acpi notifier chain (all asics).
+ */
+void amdgpu_acpi_fini(struct amdgpu_device *adev)
+{
+ unregister_acpi_notifier(&adev->acpi_nb);
+}
+
+/**
+ * amdgpu_atif_pci_probe_handle - look up the ATIF handle
+ *
+ * @pdev: pci device
+ *
+ * Look up the ATIF handles (all asics).
+ * Returns true if the handle is found, false if not.
+ */
+static bool amdgpu_atif_pci_probe_handle(struct pci_dev *pdev)
+{
+ char acpi_method_name[255] = { 0 };
+ struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
+ acpi_handle dhandle, atif_handle;
+ acpi_status status;
int ret;
- /* Get the device handle */
- handle = ACPI_HANDLE(&adev->pdev->dev);
+ dhandle = ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ return false;
- if (!adev->bios || !handle)
- return 0;
+ status = acpi_get_handle(dhandle, "ATIF", &atif_handle);
+ if (ACPI_FAILURE(status))
+ return false;
- /* Call the ATCS method */
- ret = amdgpu_atcs_verify_interface(handle, atcs);
+ amdgpu_acpi_priv.atif.handle = atif_handle;
+ acpi_get_name(amdgpu_acpi_priv.atif.handle, ACPI_FULL_PATHNAME, &buffer);
+ DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name);
+ ret = amdgpu_atif_verify_interface(&amdgpu_acpi_priv.atif);
if (ret) {
- DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret);
+ amdgpu_acpi_priv.atif.handle = 0;
+ return false;
}
+ return true;
+}
+
+/**
+ * amdgpu_atcs_pci_probe_handle - look up the ATCS handle
+ *
+ * @pdev: pci device
+ *
+ * Look up the ATCS handles (all asics).
+ * Returns true if the handle is found, false if not.
+ */
+static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev)
+{
+ char acpi_method_name[255] = { 0 };
+ struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name };
+ acpi_handle dhandle, atcs_handle;
+ acpi_status status;
+ int ret;
+
+ dhandle = ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ return false;
- /* Call the ATIF method */
- ret = amdgpu_atif_verify_interface(handle, atif);
+ status = acpi_get_handle(dhandle, "ATCS", &atcs_handle);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ amdgpu_acpi_priv.atcs.handle = atcs_handle;
+ acpi_get_name(amdgpu_acpi_priv.atcs.handle, ACPI_FULL_PATHNAME, &buffer);
+ DRM_DEBUG_DRIVER("Found ATCS handle %s\n", acpi_method_name);
+ ret = amdgpu_atcs_verify_interface(&amdgpu_acpi_priv.atcs);
if (ret) {
- DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
- goto out;
+ amdgpu_acpi_priv.atcs.handle = 0;
+ return false;
}
+ return true;
+}
- if (atif->notifications.brightness_change) {
- struct drm_encoder *tmp;
-
- /* Find the encoder controlling the brightness */
- list_for_each_entry(tmp, &adev->ddev->mode_config.encoder_list,
- head) {
- struct amdgpu_encoder *enc = to_amdgpu_encoder(tmp);
-
- if ((enc->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
- enc->enc_priv) {
- struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
- if (dig->bl_dev) {
- atif->encoder_for_bl = enc;
- break;
- }
- }
- }
+
+/**
+ * amdgpu_acpi_should_gpu_reset
+ *
+ * @adev: amdgpu_device_pointer
+ *
+ * returns true if should reset GPU, false if not
+ */
+bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev)
+{
+ if ((adev->flags & AMD_IS_APU) &&
+ adev->gfx.imu.funcs) /* Not need to do mode2 reset for IMU enabled APUs */
+ return false;
+
+ if ((adev->flags & AMD_IS_APU) &&
+ amdgpu_acpi_is_s3_active(adev))
+ return false;
+
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
+#if IS_ENABLED(CONFIG_SUSPEND)
+ return pm_suspend_target_state != PM_SUSPEND_TO_IDLE;
+#else
+ return true;
+#endif
+}
+
+/*
+ * amdgpu_acpi_detect - detect ACPI ATIF/ATCS methods
+ *
+ * Check if we have the ATIF/ATCS methods and populate
+ * the structures in the driver.
+ */
+void amdgpu_acpi_detect(void)
+{
+ struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
+ struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs;
+ struct pci_dev *pdev = NULL;
+ int ret;
+
+ while ((pdev = pci_get_base_class(PCI_BASE_CLASS_DISPLAY, pdev))) {
+ if ((pdev->class != PCI_CLASS_DISPLAY_VGA << 8) &&
+ (pdev->class != PCI_CLASS_DISPLAY_OTHER << 8))
+ continue;
+
+ if (!atif->handle)
+ amdgpu_atif_pci_probe_handle(pdev);
+ if (!atcs->handle)
+ amdgpu_atcs_pci_probe_handle(pdev);
}
if (atif->functions.sbios_requests && !atif->functions.system_params) {
@@ -690,8 +1427,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
}
if (atif->functions.system_params) {
- ret = amdgpu_atif_get_notification_params(handle,
- &atif->notification_cfg);
+ ret = amdgpu_atif_get_notification_params(atif);
if (ret) {
DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n",
ret);
@@ -700,21 +1436,131 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
}
}
-out:
- adev->acpi_nb.notifier_call = amdgpu_acpi_event;
- register_acpi_notifier(&adev->acpi_nb);
+ if (atif->functions.query_backlight_transfer_characteristics) {
+ ret = amdgpu_atif_query_backlight_caps(atif);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Call to QUERY_BACKLIGHT_TRANSFER_CHARACTERISTICS failed: %d\n",
+ ret);
+ atif->backlight_caps.caps_valid = false;
+ }
+ } else {
+ atif->backlight_caps.caps_valid = false;
+ }
- return ret;
+ amdgpu_acpi_enumerate_xcc();
+}
+
+void amdgpu_acpi_release(void)
+{
+ struct amdgpu_acpi_dev_info *dev_info, *dev_tmp;
+ struct amdgpu_acpi_xcc_info *xcc_info, *xcc_tmp;
+ struct amdgpu_numa_info *numa_info;
+ unsigned long index;
+
+ xa_for_each(&numa_info_xa, index, numa_info) {
+ kfree(numa_info);
+ xa_erase(&numa_info_xa, index);
+ }
+
+ if (list_empty(&amdgpu_acpi_dev_list))
+ return;
+
+ list_for_each_entry_safe(dev_info, dev_tmp, &amdgpu_acpi_dev_list,
+ list) {
+ list_for_each_entry_safe(xcc_info, xcc_tmp, &dev_info->xcc_list,
+ list) {
+ list_del(&xcc_info->list);
+ kfree(xcc_info);
+ }
+
+ list_del(&dev_info->list);
+ kfree(dev_info);
+ }
}
+#if IS_ENABLED(CONFIG_SUSPEND)
/**
- * amdgpu_acpi_fini - tear down driver acpi support
+ * amdgpu_acpi_is_s3_active
*
- * @adev: amdgpu_device pointer
+ * @adev: amdgpu_device_pointer
*
- * Unregisters with the acpi notifier chain (all asics).
+ * returns true if supported, false if not.
*/
-void amdgpu_acpi_fini(struct amdgpu_device *adev)
+bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev)
{
- unregister_acpi_notifier(&adev->acpi_nb);
+ return !(adev->flags & AMD_IS_APU) ||
+ (pm_suspend_target_state == PM_SUSPEND_MEM);
+}
+
+/**
+ * amdgpu_acpi_is_s0ix_active
+ *
+ * @adev: amdgpu_device_pointer
+ *
+ * returns true if supported, false if not.
+ */
+bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
+{
+ if (!(adev->flags & AMD_IS_APU) ||
+ (pm_suspend_target_state != PM_SUSPEND_TO_IDLE))
+ return false;
+
+ if (adev->asic_type < CHIP_RAVEN)
+ return false;
+
+ if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
+ return false;
+
+ /*
+ * If ACPI_FADT_LOW_POWER_S0 is not set in the FADT, it is generally
+ * risky to do any special firmware-related preparations for entering
+ * S0ix even though the system is suspending to idle, so return false
+ * in that case.
+ */
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
+ dev_err_once(adev->dev,
+ "Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n"
+ "To use suspend-to-idle change the sleep mode in BIOS setup.\n");
+ return false;
+ }
+
+#if !IS_ENABLED(CONFIG_AMD_PMC)
+ dev_err_once(adev->dev,
+ "Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n");
+ return false;
+#else
+ return true;
+#endif /* CONFIG_AMD_PMC */
+}
+#endif /* CONFIG_SUSPEND */
+
+#if IS_ENABLED(CONFIG_DRM_AMD_ISP)
+static const struct acpi_device_id isp_sensor_ids[] = {
+ { "OMNI5C10" },
+ { }
+};
+
+static int isp_match_acpi_device_ids(struct device *dev, const void *data)
+{
+ return acpi_match_device(data, dev) ? 1 : 0;
+}
+
+int amdgpu_acpi_get_isp4_dev(struct acpi_device **dev)
+{
+ struct device *pdev __free(put_device) = NULL;
+ struct acpi_device *acpi_pdev;
+
+ pdev = bus_find_device(&platform_bus_type, NULL, isp_sensor_ids,
+ isp_match_acpi_device_ids);
+ if (!pdev)
+ return -EINVAL;
+
+ acpi_pdev = ACPI_COMPANION(pdev);
+ if (!acpi_pdev)
+ return -ENODEV;
+
+ *dev = acpi_pdev;
+
+ return 0;
}
+#endif /* CONFIG_DRM_AMD_ISP */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c
index 3889486f71fe..80771b1480ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_afmt.c
@@ -25,7 +25,7 @@
*/
#include <linux/hdmi.h>
#include <linux/gcd.h>
-#include <drm/drmP.h>
+
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -100,6 +100,7 @@ struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock)
amdgpu_afmt_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
amdgpu_afmt_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
amdgpu_afmt_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
+ res.clock = clock;
return res;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index dba8a5b25e66..a2879d2b7c8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
@@ -21,226 +22,891 @@
*/
#include "amdgpu_amdkfd.h"
+#include "amd_pcie.h"
#include "amd_shared.h"
-#include <drm/drmP.h>
+
#include "amdgpu.h"
+#include "amdgpu_gfx.h"
+#include "amdgpu_dma_buf.h"
+#include <drm/ttm/ttm_tt.h>
#include <linux/module.h>
+#include <linux/dma-buf.h>
+#include "amdgpu_xgmi.h"
+#include <uapi/linux/kfd_ioctl.h>
+#include "amdgpu_ras.h"
+#include "amdgpu_umc.h"
+#include "amdgpu_reset.h"
+
+/* Total memory size in system memory and all GPU VRAM. Used to
+ * estimate worst case amount of memory to reserve for page tables
+ */
+uint64_t amdgpu_amdkfd_total_mem_size;
-const struct kfd2kgd_calls *kfd2kgd;
-const struct kgd2kfd_calls *kgd2kfd;
-bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+static bool kfd_initialized;
int amdgpu_amdkfd_init(void)
{
+ struct sysinfo si;
int ret;
-#if defined(CONFIG_HSA_AMD_MODULE)
- int (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
+ si_meminfo(&si);
+ amdgpu_amdkfd_total_mem_size = si.freeram - si.freehigh;
+ amdgpu_amdkfd_total_mem_size *= si.mem_unit;
- kgd2kfd_init_p = symbol_request(kgd2kfd_init);
+ ret = kgd2kfd_init();
+ kfd_initialized = !ret;
- if (kgd2kfd_init_p == NULL)
- return -ENOENT;
+ return ret;
+}
- ret = kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd);
- if (ret) {
- symbol_put(kgd2kfd_init);
- kgd2kfd = NULL;
+void amdgpu_amdkfd_fini(void)
+{
+ if (kfd_initialized) {
+ kgd2kfd_exit();
+ kfd_initialized = false;
}
+}
-#elif defined(CONFIG_HSA_AMD)
- ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
- if (ret)
- kgd2kfd = NULL;
+void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
+{
+ bool vf = amdgpu_sriov_vf(adev);
-#else
- ret = -ENOENT;
-#endif
+ if (!kfd_initialized)
+ return;
- return ret;
+ adev->kfd.dev = kgd2kfd_probe(adev, vf);
}
-bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev)
+/**
+ * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
+ * setup amdkfd
+ *
+ * @adev: amdgpu_device pointer
+ * @aperture_base: output returning doorbell aperture base physical address
+ * @aperture_size: output returning doorbell aperture size in bytes
+ * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
+ *
+ * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
+ * takes doorbells required for its own rings and reports the setup to amdkfd.
+ * amdgpu reserved doorbells are at the start of the doorbell aperture.
+ */
+static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
+ phys_addr_t *aperture_base,
+ size_t *aperture_size,
+ size_t *start_offset)
{
- switch (rdev->asic_type) {
-#ifdef CONFIG_DRM_AMDGPU_CIK
- case CHIP_KAVERI:
- kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
- break;
-#endif
- case CHIP_CARRIZO:
- kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
- break;
- default:
- return false;
+ /*
+ * The first num_kernel_doorbells are used by amdgpu.
+ * amdkfd takes whatever's left in the aperture.
+ */
+ if (adev->enable_mes) {
+ /*
+ * With MES enabled, we only need to initialize
+ * the base address. The size and offset are
+ * not initialized as AMDGPU manages the whole
+ * doorbell space.
+ */
+ *aperture_base = adev->doorbell.base;
+ *aperture_size = 0;
+ *start_offset = 0;
+ } else if (adev->doorbell.size > adev->doorbell.num_kernel_doorbells *
+ sizeof(u32)) {
+ *aperture_base = adev->doorbell.base;
+ *aperture_size = adev->doorbell.size;
+ *start_offset = adev->doorbell.num_kernel_doorbells * sizeof(u32);
+ } else {
+ *aperture_base = 0;
+ *aperture_size = 0;
+ *start_offset = 0;
}
-
- return true;
}
-void amdgpu_amdkfd_fini(void)
+
+static void amdgpu_amdkfd_reset_work(struct work_struct *work)
{
- if (kgd2kfd) {
- kgd2kfd->exit();
- symbol_put(kgd2kfd_init);
- }
+ struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
+ kfd.reset_work);
+
+ struct amdgpu_reset_context reset_context;
+
+ memset(&reset_context, 0, sizeof(reset_context));
+
+ reset_context.method = AMD_RESET_METHOD_NONE;
+ reset_context.reset_req_dev = adev;
+ reset_context.src = adev->enable_mes ?
+ AMDGPU_RESET_SRC_MES :
+ AMDGPU_RESET_SRC_HWS;
+ clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+
+ amdgpu_device_gpu_recover(adev, NULL, &reset_context);
}
-void amdgpu_amdkfd_device_probe(struct amdgpu_device *rdev)
+static const struct drm_client_funcs kfd_client_funcs = {
+ .unregister = drm_client_release,
+};
+
+int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev)
{
- if (kgd2kfd)
- rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev,
- rdev->pdev, kfd2kgd);
+ int ret;
+
+ if (!adev->kfd.init_complete || adev->kfd.client.dev)
+ return 0;
+
+ ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd",
+ &kfd_client_funcs);
+ if (ret) {
+ dev_err(adev->dev, "Failed to init DRM client: %d\n",
+ ret);
+ return ret;
+ }
+
+ drm_client_register(&adev->kfd.client);
+
+ return 0;
}
-void amdgpu_amdkfd_device_init(struct amdgpu_device *rdev)
+void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
{
- if (rdev->kfd) {
- struct kgd2kfd_shared_resources gpu_resources = {
- .compute_vmid_bitmap = 0xFF00,
+ int i;
+ int last_valid_bit;
+
+ amdgpu_amdkfd_gpuvm_init_mem_limits();
- .first_compute_pipe = 1,
- .compute_pipe_count = 4 - 1,
+ if (adev->kfd.dev) {
+ struct kgd2kfd_shared_resources gpu_resources = {
+ .compute_vmid_bitmap =
+ ((1 << AMDGPU_NUM_VMID) - 1) -
+ ((1 << adev->vm_manager.first_kfd_vmid) - 1),
+ .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
+ .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
+ .gpuvm_size = min(adev->vm_manager.max_pfn
+ << AMDGPU_GPU_PAGE_SHIFT,
+ AMDGPU_GMC_HOLE_START),
+ .drm_render_minor = adev_to_drm(adev)->render->index,
+ .sdma_doorbell_idx = adev->doorbell_index.sdma_engine,
+ .enable_mes = adev->enable_mes,
};
- amdgpu_doorbell_get_kfd_info(rdev,
+ /* this is going to have a few of the MSBs set that we need to
+ * clear
+ */
+ bitmap_complement(gpu_resources.cp_queue_bitmap,
+ adev->gfx.mec_bitmap[0].queue_bitmap,
+ AMDGPU_MAX_QUEUES);
+
+ /* According to linux/bitmap.h we shouldn't use bitmap_clear if
+ * nbits is not compile time constant
+ */
+ last_valid_bit = 1 /* only first MEC can have compute queues */
+ * adev->gfx.mec.num_pipe_per_mec
+ * adev->gfx.mec.num_queue_per_pipe;
+ for (i = last_valid_bit; i < AMDGPU_MAX_QUEUES; ++i)
+ clear_bit(i, gpu_resources.cp_queue_bitmap);
+
+ amdgpu_doorbell_get_kfd_info(adev,
&gpu_resources.doorbell_physical_address,
&gpu_resources.doorbell_aperture_size,
&gpu_resources.doorbell_start_offset);
- kgd2kfd->device_init(rdev->kfd, &gpu_resources);
+ /* Since SOC15, BIF starts to statically use the
+ * lower 12 bits of doorbell addresses for routing
+ * based on settings in registers like
+ * SDMA0_DOORBELL_RANGE etc..
+ * In order to route a doorbell to CP engine, the lower
+ * 12 bits of its address has to be outside the range
+ * set for SDMA, VCN, and IH blocks.
+ */
+ if (adev->asic_type >= CHIP_VEGA10) {
+ gpu_resources.non_cp_doorbells_start =
+ adev->doorbell_index.first_non_cp;
+ gpu_resources.non_cp_doorbells_end =
+ adev->doorbell_index.last_non_cp;
+ }
+
+ adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev,
+ &gpu_resources);
+
+ amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
+
+ INIT_WORK(&adev->kfd.reset_work, amdgpu_amdkfd_reset_work);
}
}
-void amdgpu_amdkfd_device_fini(struct amdgpu_device *rdev)
+void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev)
{
- if (rdev->kfd) {
- kgd2kfd->device_exit(rdev->kfd);
- rdev->kfd = NULL;
+ if (adev->kfd.dev) {
+ kgd2kfd_device_exit(adev->kfd.dev);
+ adev->kfd.dev = NULL;
+ amdgpu_amdkfd_total_mem_size -= adev->gmc.real_vram_size;
}
}
-void amdgpu_amdkfd_interrupt(struct amdgpu_device *rdev,
+void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry)
{
- if (rdev->kfd)
- kgd2kfd->interrupt(rdev->kfd, ih_ring_entry);
+ if (adev->kfd.dev)
+ kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
+}
+
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc)
+{
+ if (adev->kfd.dev) {
+ if (adev->in_s0ix)
+ kgd2kfd_stop_sched_all_nodes(adev->kfd.dev);
+ else
+ kgd2kfd_suspend(adev->kfd.dev, suspend_proc);
+ }
+}
+
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc)
+{
+ int r = 0;
+
+ if (adev->kfd.dev) {
+ if (adev->in_s0ix)
+ r = kgd2kfd_start_sched_all_nodes(adev->kfd.dev);
+ else
+ r = kgd2kfd_resume(adev->kfd.dev, resume_proc);
+ }
+
+ return r;
+}
+
+void amdgpu_amdkfd_suspend_process(struct amdgpu_device *adev)
+{
+ if (adev->kfd.dev)
+ kgd2kfd_suspend_process(adev->kfd.dev);
+}
+
+int amdgpu_amdkfd_resume_process(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (adev->kfd.dev)
+ r = kgd2kfd_resume_process(adev->kfd.dev);
+
+ return r;
}
-void amdgpu_amdkfd_suspend(struct amdgpu_device *rdev)
+int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev,
+ struct amdgpu_reset_context *reset_context)
{
- if (rdev->kfd)
- kgd2kfd->suspend(rdev->kfd);
+ int r = 0;
+
+ if (adev->kfd.dev)
+ r = kgd2kfd_pre_reset(adev->kfd.dev, reset_context);
+
+ return r;
}
-int amdgpu_amdkfd_resume(struct amdgpu_device *rdev)
+int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
{
int r = 0;
- if (rdev->kfd)
- r = kgd2kfd->resume(rdev->kfd);
+ if (adev->kfd.dev)
+ r = kgd2kfd_post_reset(adev->kfd.dev);
return r;
}
-int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
- void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr)
+void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev)
{
- struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
- struct kgd_mem **mem = (struct kgd_mem **) mem_obj;
+ if (amdgpu_device_should_recover_gpu(adev))
+ amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->kfd.reset_work);
+}
+
+int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
+ void **mem_obj, uint64_t *gpu_addr,
+ void **cpu_ptr, bool cp_mqd_gfx9)
+{
+ struct amdgpu_bo *bo = NULL;
+ struct amdgpu_bo_param bp;
int r;
+ void *cpu_ptr_tmp = NULL;
- BUG_ON(kgd == NULL);
- BUG_ON(gpu_addr == NULL);
- BUG_ON(cpu_ptr == NULL);
+ memset(&bp, 0, sizeof(bp));
+ bp.size = size;
+ bp.byte_align = PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_GTT;
+ bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ bp.type = ttm_bo_type_kernel;
+ bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
- *mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
- if ((*mem) == NULL)
- return -ENOMEM;
+ if (cp_mqd_gfx9)
+ bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9;
- r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo);
+ r = amdgpu_bo_create(adev, &bp, &bo);
if (r) {
- dev_err(rdev->dev,
+ dev_err(adev->dev,
"failed to allocate BO for amdkfd (%d)\n", r);
return r;
}
/* map the buffer */
- r = amdgpu_bo_reserve((*mem)->bo, true);
+ r = amdgpu_bo_reserve(bo, true);
if (r) {
- dev_err(rdev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
+ dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
goto allocate_mem_reserve_bo_failed;
}
- r = amdgpu_bo_pin((*mem)->bo, AMDGPU_GEM_DOMAIN_GTT,
- &(*mem)->gpu_addr);
+ r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
if (r) {
- dev_err(rdev->dev, "(%d) failed to pin bo for amdkfd\n", r);
+ dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
goto allocate_mem_pin_bo_failed;
}
- *gpu_addr = (*mem)->gpu_addr;
- r = amdgpu_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr);
+ r = amdgpu_ttm_alloc_gart(&bo->tbo);
if (r) {
- dev_err(rdev->dev,
+ dev_err(adev->dev, "%p bind failed\n", bo);
+ goto allocate_mem_kmap_bo_failed;
+ }
+
+ r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
+ if (r) {
+ dev_err(adev->dev,
"(%d) failed to map bo to kernel for amdkfd\n", r);
goto allocate_mem_kmap_bo_failed;
}
- *cpu_ptr = (*mem)->cpu_ptr;
- amdgpu_bo_unreserve((*mem)->bo);
+ *mem_obj = bo;
+ *gpu_addr = amdgpu_bo_gpu_offset(bo);
+ *cpu_ptr = cpu_ptr_tmp;
+
+ amdgpu_bo_unreserve(bo);
return 0;
allocate_mem_kmap_bo_failed:
- amdgpu_bo_unpin((*mem)->bo);
+ amdgpu_bo_unpin(bo);
allocate_mem_pin_bo_failed:
- amdgpu_bo_unreserve((*mem)->bo);
+ amdgpu_bo_unreserve(bo);
allocate_mem_reserve_bo_failed:
- amdgpu_bo_unref(&(*mem)->bo);
+ amdgpu_bo_unref(&bo);
return r;
}
-void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
+void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj)
{
- struct kgd_mem *mem = (struct kgd_mem *) mem_obj;
+ struct amdgpu_bo **bo = (struct amdgpu_bo **) mem_obj;
+
+ if (!bo || !*bo)
+ return;
+
+ (void)amdgpu_bo_reserve(*bo, true);
+ amdgpu_bo_kunmap(*bo);
+ amdgpu_bo_unpin(*bo);
+ amdgpu_bo_unreserve(*bo);
+ amdgpu_bo_unref(bo);
+}
+
+int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
+ void **mem_obj)
+{
+ struct amdgpu_bo *bo = NULL;
+ struct amdgpu_bo_user *ubo;
+ struct amdgpu_bo_param bp;
+ int r;
+
+ memset(&bp, 0, sizeof(bp));
+ bp.size = size;
+ bp.byte_align = 1;
+ bp.domain = AMDGPU_GEM_DOMAIN_GWS;
+ bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+ bp.type = ttm_bo_type_device;
+ bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
+ r = amdgpu_bo_create_user(adev, &bp, &ubo);
+ if (r) {
+ dev_err(adev->dev,
+ "failed to allocate gws BO for amdkfd (%d)\n", r);
+ return r;
+ }
+
+ bo = &ubo->bo;
+ *mem_obj = bo;
+ return 0;
+}
- BUG_ON(mem == NULL);
+void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj)
+{
+ struct amdgpu_bo *bo = (struct amdgpu_bo *)mem_obj;
- amdgpu_bo_reserve(mem->bo, true);
- amdgpu_bo_kunmap(mem->bo);
- amdgpu_bo_unpin(mem->bo);
- amdgpu_bo_unreserve(mem->bo);
- amdgpu_bo_unref(&(mem->bo));
- kfree(mem);
+ amdgpu_bo_unref(&bo);
}
-uint64_t get_vmem_size(struct kgd_dev *kgd)
+uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev,
+ enum kgd_engine_type type)
{
- struct amdgpu_device *rdev =
- (struct amdgpu_device *)kgd;
+ switch (type) {
+ case KGD_ENGINE_PFP:
+ return adev->gfx.pfp_fw_version;
+
+ case KGD_ENGINE_ME:
+ return adev->gfx.me_fw_version;
+
+ case KGD_ENGINE_CE:
+ return adev->gfx.ce_fw_version;
+
+ case KGD_ENGINE_MEC1:
+ return adev->gfx.mec_fw_version;
- BUG_ON(kgd == NULL);
+ case KGD_ENGINE_MEC2:
+ return adev->gfx.mec2_fw_version;
- return rdev->mc.real_vram_size;
+ case KGD_ENGINE_RLC:
+ return adev->gfx.rlc_fw_version;
+
+ case KGD_ENGINE_SDMA1:
+ return adev->sdma.instance[0].fw_version;
+
+ case KGD_ENGINE_SDMA2:
+ return adev->sdma.instance[1].fw_version;
+
+ default:
+ return 0;
+ }
+
+ return 0;
}
-uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
+void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
+ struct kfd_local_mem_info *mem_info,
+ struct amdgpu_xcp *xcp)
{
- struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
+ memset(mem_info, 0, sizeof(*mem_info));
+
+ if (xcp) {
+ if (adev->gmc.real_vram_size == adev->gmc.visible_vram_size)
+ mem_info->local_mem_size_public =
+ KFD_XCP_MEMORY_SIZE(adev, xcp->id);
+ else
+ mem_info->local_mem_size_private =
+ KFD_XCP_MEMORY_SIZE(adev, xcp->id);
+ } else if (adev->apu_prefer_gtt) {
+ mem_info->local_mem_size_public = (ttm_tt_pages_limit() << PAGE_SHIFT);
+ mem_info->local_mem_size_private = 0;
+ } else {
+ mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
+ mem_info->local_mem_size_private = adev->gmc.real_vram_size -
+ adev->gmc.visible_vram_size;
+ }
+ mem_info->vram_width = adev->gmc.vram_width;
+
+ pr_debug("Address base: %pap public 0x%llx private 0x%llx\n",
+ &adev->gmc.aper_base,
+ mem_info->local_mem_size_public,
+ mem_info->local_mem_size_private);
+
+ if (adev->pm.dpm_enabled) {
+ if (amdgpu_emu_mode == 1)
+ mem_info->mem_clk_max = 0;
+ else
+ mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
+ } else
+ mem_info->mem_clk_max = 100;
+}
- if (rdev->gfx.funcs->get_gpu_clock_counter)
- return rdev->gfx.funcs->get_gpu_clock_counter(rdev);
+uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev)
+{
+ if (adev->gfx.funcs->get_gpu_clock_counter)
+ return adev->gfx.funcs->get_gpu_clock_counter(adev);
return 0;
}
-uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
+uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev)
{
- struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
+ /* the sclk is in quantas of 10kHz */
+ if (adev->pm.dpm_enabled)
+ return amdgpu_dpm_get_sclk(adev, false) / 100;
+ else
+ return 100;
+}
- /* The sclk is in quantas of 10kHz */
- return rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
+int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
+ struct amdgpu_device **dmabuf_adev,
+ uint64_t *bo_size, void *metadata_buffer,
+ size_t buffer_size, uint32_t *metadata_size,
+ uint32_t *flags, int8_t *xcp_id)
+{
+ struct dma_buf *dma_buf;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *bo;
+ uint64_t metadata_flags;
+ int r = -EINVAL;
+
+ dma_buf = dma_buf_get(dma_buf_fd);
+ if (IS_ERR(dma_buf))
+ return PTR_ERR(dma_buf);
+
+ if (dma_buf->ops != &amdgpu_dmabuf_ops)
+ /* Can't handle non-graphics buffers */
+ goto out_put;
+
+ obj = dma_buf->priv;
+ if (obj->dev->driver != adev_to_drm(adev)->driver)
+ /* Can't handle buffers from different drivers */
+ goto out_put;
+
+ adev = drm_to_adev(obj->dev);
+ bo = gem_to_amdgpu_bo(obj);
+ if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT)))
+ /* Only VRAM and GTT BOs are supported */
+ goto out_put;
+
+ r = 0;
+ if (dmabuf_adev)
+ *dmabuf_adev = adev;
+ if (bo_size)
+ *bo_size = amdgpu_bo_size(bo);
+ if (metadata_buffer)
+ r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
+ metadata_size, &metadata_flags);
+ if (flags) {
+ *flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
+ KFD_IOC_ALLOC_MEM_FLAGS_VRAM
+ : KFD_IOC_ALLOC_MEM_FLAGS_GTT;
+
+ if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
+ *flags |= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC;
+ }
+ if (xcp_id)
+ *xcp_id = bo->xcp_id;
+
+out_put:
+ dma_buf_put(dma_buf);
+ return r;
+}
+
+int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min)
+{
+ int num_lanes_shift = (is_min ? ffs(adev->pm.pcie_mlw_mask) :
+ fls(adev->pm.pcie_mlw_mask)) - 1;
+ int gen_speed_shift = (is_min ? ffs(adev->pm.pcie_gen_mask &
+ CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) :
+ fls(adev->pm.pcie_gen_mask &
+ CAIL_PCIE_LINK_SPEED_SUPPORT_MASK)) - 1;
+ uint32_t num_lanes_mask = 1 << num_lanes_shift;
+ uint32_t gen_speed_mask = 1 << gen_speed_shift;
+ int num_lanes_factor = 0, gen_speed_mbits_factor = 0;
+
+ switch (num_lanes_mask) {
+ case CAIL_PCIE_LINK_WIDTH_SUPPORT_X1:
+ num_lanes_factor = 1;
+ break;
+ case CAIL_PCIE_LINK_WIDTH_SUPPORT_X2:
+ num_lanes_factor = 2;
+ break;
+ case CAIL_PCIE_LINK_WIDTH_SUPPORT_X4:
+ num_lanes_factor = 4;
+ break;
+ case CAIL_PCIE_LINK_WIDTH_SUPPORT_X8:
+ num_lanes_factor = 8;
+ break;
+ case CAIL_PCIE_LINK_WIDTH_SUPPORT_X12:
+ num_lanes_factor = 12;
+ break;
+ case CAIL_PCIE_LINK_WIDTH_SUPPORT_X16:
+ num_lanes_factor = 16;
+ break;
+ case CAIL_PCIE_LINK_WIDTH_SUPPORT_X32:
+ num_lanes_factor = 32;
+ break;
+ }
+
+ switch (gen_speed_mask) {
+ case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1:
+ gen_speed_mbits_factor = 2500;
+ break;
+ case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2:
+ gen_speed_mbits_factor = 5000;
+ break;
+ case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3:
+ gen_speed_mbits_factor = 8000;
+ break;
+ case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4:
+ gen_speed_mbits_factor = 16000;
+ break;
+ case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5:
+ gen_speed_mbits_factor = 32000;
+ break;
+ }
+
+ return (num_lanes_factor * gen_speed_mbits_factor)/BITS_PER_BYTE;
+}
+
+int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
+ enum kgd_engine_type engine,
+ uint32_t vmid, uint64_t gpu_addr,
+ uint32_t *ib_cmd, uint32_t ib_len)
+{
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct amdgpu_ring *ring;
+ struct dma_fence *f = NULL;
+ int ret;
+
+ switch (engine) {
+ case KGD_ENGINE_MEC1:
+ ring = &adev->gfx.compute_ring[0];
+ break;
+ case KGD_ENGINE_SDMA1:
+ ring = &adev->sdma.instance[0].ring;
+ break;
+ case KGD_ENGINE_SDMA2:
+ ring = &adev->sdma.instance[1].ring;
+ break;
+ default:
+ pr_err("Invalid engine in IB submission: %d\n", engine);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job, 0);
+ if (ret)
+ goto err;
+
+ ib = &job->ibs[0];
+ memset(ib, 0, sizeof(struct amdgpu_ib));
+
+ ib->gpu_addr = gpu_addr;
+ ib->ptr = ib_cmd;
+ ib->length_dw = ib_len;
+ /* This works for NO_HWS. TODO: need to handle without knowing VMID */
+ job->vmid = vmid;
+ job->num_ibs = 1;
+
+ ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
+
+ if (ret) {
+ DRM_ERROR("amdgpu: failed to schedule IB.\n");
+ goto err_ib_sched;
+ }
+
+ /* Drop the initial kref_init count (see drm_sched_main as example) */
+ dma_fence_put(f);
+ ret = dma_fence_wait(f, false);
+
+err_ib_sched:
+ amdgpu_job_free(job);
+err:
+ return ret;
+}
+
+void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle)
+{
+ enum amd_powergating_state state = idle ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE;
+ if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 11 &&
+ ((adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK) <= 64)) ||
+ (IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 12)) {
+ pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled");
+ amdgpu_gfx_off_ctrl(adev, idle);
+ } else if ((IP_VERSION_MAJ(amdgpu_ip_version(adev, GC_HWIP, 0)) == 9) &&
+ (adev->flags & AMD_IS_APU)) {
+ /* Disable GFXOFF and PG. Temporary workaround
+ * to fix some compute applications issue on GFX9.
+ */
+ struct amdgpu_ip_block *gfx_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
+ if (gfx_block != NULL)
+ gfx_block->version->funcs->set_powergating_state((void *)gfx_block, state);
+ }
+ amdgpu_dpm_switch_power_profile(adev,
+ PP_SMC_POWER_PROFILE_COMPUTE,
+ !idle);
+}
+
+bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
+{
+ if (adev->kfd.dev)
+ return vmid >= adev->vm_manager.first_kfd_vmid;
+
+ return false;
+}
+
+bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev)
+{
+ return adev->have_atomics_support;
+}
+
+void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev)
+{
+ amdgpu_device_flush_hdp(adev, NULL);
+}
+
+bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev)
+{
+ return amdgpu_ras_get_fed_status(adev);
+}
+
+void amdgpu_amdkfd_ras_pasid_poison_consumption_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, uint16_t pasid,
+ pasid_notify pasid_fn, void *data, uint32_t reset)
+{
+ amdgpu_umc_pasid_poison_handler(adev, block, pasid, pasid_fn, data, reset);
+}
+
+void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, uint32_t reset)
+{
+ amdgpu_umc_pasid_poison_handler(adev, block, 0, NULL, NULL, reset);
+}
+
+int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
+ uint32_t *payload)
+{
+ int ret;
+
+ /* Device or IH ring is not ready so bail. */
+ ret = amdgpu_ih_wait_on_checkpoint_process_ts(adev, &adev->irq.ih);
+ if (ret)
+ return ret;
+
+ /* Send payload to fence KFD interrupts */
+ amdgpu_amdkfd_interrupt(adev, payload);
+
+ return 0;
+}
+
+int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev)
+{
+ return kgd2kfd_check_and_lock_kfd(adev->kfd.dev);
+}
+
+void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev)
+{
+ kgd2kfd_unlock_kfd(adev->kfd.dev);
+}
+
+
+u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id)
+{
+ s8 mem_id = KFD_XCP_MEM_ID(adev, xcp_id);
+ u64 tmp;
+
+ if (adev->gmc.num_mem_partitions && xcp_id >= 0 && mem_id >= 0) {
+ if (adev->gmc.is_app_apu && adev->gmc.num_mem_partitions == 1) {
+ /* In NPS1 mode, we should restrict the vram reporting
+ * tied to the ttm_pages_limit which is 1/2 of the system
+ * memory. For other partition modes, the HBM is uniformly
+ * divided already per numa node reported. If user wants to
+ * go beyond the default ttm limit and maximize the ROCm
+ * allocations, they can go up to max ttm and sysmem limits.
+ */
+
+ tmp = (ttm_tt_pages_limit() << PAGE_SHIFT) / num_online_nodes();
+ } else {
+ tmp = adev->gmc.mem_partitions[mem_id].size;
+ }
+ do_div(tmp, adev->xcp_mgr->num_xcp_per_mem_partition);
+ return ALIGN_DOWN(tmp, PAGE_SIZE);
+ } else if (adev->apu_prefer_gtt) {
+ return (ttm_tt_pages_limit() << PAGE_SHIFT);
+ } else {
+ return adev->gmc.real_vram_size;
+ }
+}
+
+int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
+ u32 inst)
+{
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ struct amdgpu_ring_funcs *ring_funcs;
+ struct amdgpu_ring *ring;
+ int r = 0;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ if (!kiq_ring->sched.ready || amdgpu_in_reset(adev))
+ return 0;
+
+ ring_funcs = kzalloc(sizeof(*ring_funcs), GFP_KERNEL);
+ if (!ring_funcs)
+ return -ENOMEM;
+
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring) {
+ r = -ENOMEM;
+ goto free_ring_funcs;
+ }
+
+ ring_funcs->type = AMDGPU_RING_TYPE_COMPUTE;
+ ring->doorbell_index = doorbell_off;
+ ring->funcs = ring_funcs;
+
+ spin_lock(&kiq->ring_lock);
+
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
+ spin_unlock(&kiq->ring_lock);
+ r = -ENOMEM;
+ goto free_ring;
+ }
+
+ kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, 0, 0);
+
+ /* Submit unmap queue packet */
+ amdgpu_ring_commit(kiq_ring);
+ /*
+ * Ring test will do a basic scratch register change check. Just run
+ * this to ensure that unmap queues that is submitted before got
+ * processed successfully before returning.
+ */
+ r = amdgpu_ring_test_helper(kiq_ring);
+
+ spin_unlock(&kiq->ring_lock);
+
+free_ring:
+ kfree(ring);
+
+free_ring_funcs:
+ kfree(ring_funcs);
+
+ return r;
+}
+
+/* Stop scheduling on KFD */
+int amdgpu_amdkfd_stop_sched(struct amdgpu_device *adev, uint32_t node_id)
+{
+ if (!adev->kfd.init_complete)
+ return 0;
+
+ return kgd2kfd_stop_sched(adev->kfd.dev, node_id);
+}
+
+/* Start scheduling on KFD */
+int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id)
+{
+ if (!adev->kfd.init_complete)
+ return 0;
+
+ return kgd2kfd_start_sched(adev->kfd.dev, node_id);
+}
+
+/* check if there are KFD queues active */
+bool amdgpu_amdkfd_compute_active(struct amdgpu_device *adev, uint32_t node_id)
+{
+ if (!adev->kfd.init_complete)
+ return false;
+
+ return kgd2kfd_compute_active(adev->kfd.dev, node_id);
+}
+
+/* Config CGTT_SQ_CLK_CTRL */
+int amdgpu_amdkfd_config_sq_perfmon(struct amdgpu_device *adev, uint32_t xcp_id,
+ bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable)
+{
+ int r;
+
+ if (!adev->kfd.init_complete)
+ return 0;
+
+ r = psp_config_sq_perfmon(&adev->psp, xcp_id, core_override_enable,
+ reg_override_enable, perfmon_override_enable);
+
+ return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index de530f68d4e3..9e120c934cc1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -25,41 +25,526 @@
#ifndef AMDGPU_AMDKFD_H_INCLUDED
#define AMDGPU_AMDKFD_H_INCLUDED
+#include <linux/list.h>
#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/kthread.h>
+#include <linux/workqueue.h>
+#include <linux/mmu_notifier.h>
+#include <linux/memremap.h>
#include <kgd_kfd_interface.h>
+#include <drm/drm_client.h>
+#include "amdgpu_sync.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_xcp.h"
+
+extern uint64_t amdgpu_amdkfd_total_mem_size;
+
+enum TLB_FLUSH_TYPE {
+ TLB_FLUSH_LEGACY = 0,
+ TLB_FLUSH_LIGHTWEIGHT,
+ TLB_FLUSH_HEAVYWEIGHT
+};
struct amdgpu_device;
+struct kfd_process_device;
+struct amdgpu_reset_context;
+
+enum kfd_mem_attachment_type {
+ KFD_MEM_ATT_SHARED, /* Share kgd_mem->bo or another attachment's */
+ KFD_MEM_ATT_USERPTR, /* SG bo to DMA map pages from a userptr bo */
+ KFD_MEM_ATT_DMABUF, /* DMAbuf to DMA map TTM BOs */
+ KFD_MEM_ATT_SG /* Tag to DMA map SG BOs */
+};
+
+struct kfd_mem_attachment {
+ struct list_head list;
+ enum kfd_mem_attachment_type type;
+ bool is_mapped;
+ struct amdgpu_bo_va *bo_va;
+ struct amdgpu_device *adev;
+ uint64_t va;
+ uint64_t pte_flags;
+};
struct kgd_mem {
+ struct mutex lock;
struct amdgpu_bo *bo;
- uint64_t gpu_addr;
- void *cpu_ptr;
+ struct dma_buf *dmabuf;
+ struct hmm_range *range;
+ struct list_head attachments;
+ /* protected by amdkfd_process_info.lock */
+ struct list_head validate_list;
+ uint32_t domain;
+ unsigned int mapped_to_gpu_memory;
+ uint64_t va;
+
+ uint32_t alloc_flags;
+
+ uint32_t invalid;
+ struct amdkfd_process_info *process_info;
+
+ struct amdgpu_sync sync;
+
+ uint32_t gem_handle;
+ bool aql_queue;
+ bool is_imported;
+};
+
+/* KFD Memory Eviction */
+struct amdgpu_amdkfd_fence {
+ struct dma_fence base;
+ struct mm_struct *mm;
+ spinlock_t lock;
+ char timeline_name[TASK_COMM_LEN];
+ struct svm_range_bo *svm_bo;
+};
+
+struct amdgpu_kfd_dev {
+ struct kfd_dev *dev;
+ int64_t vram_used[MAX_XCP];
+ uint64_t vram_used_aligned[MAX_XCP];
+ bool init_complete;
+ struct work_struct reset_work;
+
+ /* Client for KFD BO GEM handle allocations */
+ struct drm_client_dev client;
+
+ /* HMM page migration MEMORY_DEVICE_PRIVATE mapping
+ * Must be last --ends in a flexible-array member.
+ */
+ struct dev_pagemap pgmap;
+};
+
+enum kgd_engine_type {
+ KGD_ENGINE_PFP = 1,
+ KGD_ENGINE_ME,
+ KGD_ENGINE_CE,
+ KGD_ENGINE_MEC1,
+ KGD_ENGINE_MEC2,
+ KGD_ENGINE_RLC,
+ KGD_ENGINE_SDMA1,
+ KGD_ENGINE_SDMA2,
+ KGD_ENGINE_MAX
+};
+
+
+struct amdkfd_process_info {
+ /* List head of all VMs that belong to a KFD process */
+ struct list_head vm_list_head;
+ /* List head for all KFD BOs that belong to a KFD process. */
+ struct list_head kfd_bo_list;
+ /* List of userptr BOs that are valid or invalid */
+ struct list_head userptr_valid_list;
+ struct list_head userptr_inval_list;
+ /* Lock to protect kfd_bo_list */
+ struct mutex lock;
+
+ /* Number of VMs */
+ unsigned int n_vms;
+ /* Eviction Fence */
+ struct amdgpu_amdkfd_fence *eviction_fence;
+
+ /* MMU-notifier related fields */
+ struct mutex notifier_lock;
+ uint32_t evicted_bos;
+ struct delayed_work restore_userptr_work;
+ struct pid *pid;
+ bool block_mmu_notifications;
};
int amdgpu_amdkfd_init(void);
void amdgpu_amdkfd_fini(void);
-bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev);
-
-void amdgpu_amdkfd_suspend(struct amdgpu_device *rdev);
-int amdgpu_amdkfd_resume(struct amdgpu_device *rdev);
-void amdgpu_amdkfd_interrupt(struct amdgpu_device *rdev,
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc);
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc);
+void amdgpu_amdkfd_suspend_process(struct amdgpu_device *adev);
+int amdgpu_amdkfd_resume_process(struct amdgpu_device *adev);
+void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry);
-void amdgpu_amdkfd_device_probe(struct amdgpu_device *rdev);
-void amdgpu_amdkfd_device_init(struct amdgpu_device *rdev);
-void amdgpu_amdkfd_device_fini(struct amdgpu_device *rdev);
+void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
+void amdgpu_amdkfd_device_init(struct amdgpu_device *adev);
+void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev);
+int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev);
+void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev);
+int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
+ enum kgd_engine_type engine,
+ uint32_t vmid, uint64_t gpu_addr,
+ uint32_t *ib_cmd, uint32_t ib_len);
+void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle);
+bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev);
+
+bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
+
+int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev,
+ struct amdgpu_reset_context *reset_context);
+
+int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
+
+void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev);
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void);
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void);
+int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
+ int queue_bit);
+struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
+ struct mm_struct *mm,
+ struct svm_range_bo *svm_bo);
+
+int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev);
+#if defined(CONFIG_DEBUG_FS)
+int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data);
+#endif
+#if IS_ENABLED(CONFIG_HSA_AMD)
+bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
+struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
+void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo);
+int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
+ unsigned long cur_seq, struct kgd_mem *mem);
+int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
+ uint32_t domain,
+ struct dma_fence *fence);
+#else
+static inline
+bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
+{
+ return false;
+}
+
+static inline
+struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
+{
+ return NULL;
+}
+
+static inline
+void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo)
+{
+}
+
+static inline
+int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
+ unsigned long cur_seq, struct kgd_mem *mem)
+{
+ return 0;
+}
+static inline
+int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
+ uint32_t domain,
+ struct dma_fence *fence)
+{
+ return 0;
+}
+#endif
/* Shared API */
-int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
- void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr);
-void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
-uint64_t get_vmem_size(struct kgd_dev *kgd);
-uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
+int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
+ void **mem_obj, uint64_t *gpu_addr,
+ void **cpu_ptr, bool mqd_gfx9);
+void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void **mem_obj);
+int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
+ void **mem_obj);
+void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj);
+int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem);
+int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem);
+uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev,
+ enum kgd_engine_type type);
+void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
+ struct kfd_local_mem_info *mem_info,
+ struct amdgpu_xcp *xcp);
+uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev);
+
+uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev);
+int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
+ struct amdgpu_device **dmabuf_adev,
+ uint64_t *bo_size, void *metadata_buffer,
+ size_t buffer_size, uint32_t *metadata_size,
+ uint32_t *flags, int8_t *xcp_id);
+int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min);
+int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
+ uint32_t *payload);
+int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
+ u32 inst);
+int amdgpu_amdkfd_start_sched(struct amdgpu_device *adev, uint32_t node_id);
+int amdgpu_amdkfd_stop_sched(struct amdgpu_device *adev, uint32_t node_id);
+int amdgpu_amdkfd_config_sq_perfmon(struct amdgpu_device *adev, uint32_t xcp_id,
+ bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable);
+bool amdgpu_amdkfd_compute_active(struct amdgpu_device *adev, uint32_t node_id);
+
+
+/* Read user wptr from a specified user address space with page fault
+ * disabled. The memory must be pinned and mapped to the hardware when
+ * this is called in hqd_load functions, so it should never fault in
+ * the first place. This resolves a circular lock dependency involving
+ * four locks, including the DQM lock and mmap_lock.
+ */
+#define read_user_wptr(mmptr, wptr, dst) \
+ ({ \
+ bool valid = false; \
+ if ((mmptr) && (wptr)) { \
+ pagefault_disable(); \
+ if ((mmptr) == current->mm) { \
+ valid = !get_user((dst), (wptr)); \
+ } else if (current->flags & PF_KTHREAD) { \
+ kthread_use_mm(mmptr); \
+ valid = !get_user((dst), (wptr)); \
+ kthread_unuse_mm(mmptr); \
+ } \
+ pagefault_enable(); \
+ } \
+ valid; \
+ })
+
+/* GPUVM API */
+#define drm_priv_to_vm(drm_priv) \
+ (&((struct amdgpu_fpriv *) \
+ ((struct drm_file *)(drm_priv))->driver_priv)->vm)
+
+int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
+ struct amdgpu_vm *avm,
+ void **process_info,
+ struct dma_fence **ef);
+uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv);
+size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
+ uint8_t xcp_id);
+int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+ struct amdgpu_device *adev, uint64_t va, uint64_t size,
+ void *drm_priv, struct kgd_mem **mem,
+ uint64_t *offset, uint32_t flags, bool criu_resume);
+int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
+ struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
+ uint64_t *size);
+int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev,
+ struct kgd_mem *mem, void *drm_priv);
+int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
+ struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
+int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
+int amdgpu_amdkfd_gpuvm_sync_memory(
+ struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
+int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
+ void **kptr, uint64_t *size);
+void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem);
+
+int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo, struct amdgpu_bo **bo_gart);
+
+int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
+ struct dma_fence __rcu **ef);
+int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
+ struct kfd_vm_fault_info *info);
+int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd,
+ uint64_t va, void *drm_priv,
+ struct kgd_mem **mem, uint64_t *size,
+ uint64_t *mmap_offset);
+int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem,
+ struct dma_buf **dmabuf);
+void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev);
+int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
+ struct tile_config *config);
+void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, uint32_t reset);
+
+void amdgpu_amdkfd_ras_pasid_poison_consumption_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, uint16_t pasid,
+ pasid_notify pasid_fn, void *data, uint32_t reset);
+
+bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev);
+bool amdgpu_amdkfd_bo_mapped_to_dev(void *drm_priv, struct kgd_mem *mem);
+void amdgpu_amdkfd_block_mmu_notifications(void *p);
+int amdgpu_amdkfd_criu_resume(void *p);
+int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
+ uint64_t size, u32 alloc_flag, int8_t xcp_id);
+void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
+ uint64_t size, u32 alloc_flag, int8_t xcp_id);
+
+u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id);
+
+#define KFD_XCP_MEM_ID(adev, xcp_id) \
+ ((adev)->xcp_mgr && (xcp_id) >= 0 ?\
+ (adev)->xcp_mgr->xcp[(xcp_id)].mem_id : -1)
+
+#define KFD_XCP_MEMORY_SIZE(adev, xcp_id) amdgpu_amdkfd_xcp_memory_size((adev), (xcp_id))
+
+
+#if IS_ENABLED(CONFIG_HSA_AMD)
+void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
+void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
+
+/**
+ * @amdgpu_amdkfd_release_notify() - Notify KFD when GEM object is released
+ *
+ * Allows KFD to release its resources associated with the GEM object.
+ */
+void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo);
+void amdgpu_amdkfd_reserve_system_mem(uint64_t size);
+#else
+static inline
+void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
+{
+}
+
+static inline
+void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
+{
+}
+
+static inline
+void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
+{
+}
+#endif
+
+#if IS_ENABLED(CONFIG_HSA_AMD_SVM)
+int kgd2kfd_init_zone_device(struct amdgpu_device *adev);
+#else
+static inline
+int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
+{
+ return 0;
+}
+#endif
+
+/* KGD2KFD callbacks */
+int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger);
+int kgd2kfd_resume_mm(struct mm_struct *mm);
+int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
+ struct dma_fence *fence);
+#if IS_ENABLED(CONFIG_HSA_AMD)
+int kgd2kfd_init(void);
+void kgd2kfd_exit(void);
+struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf);
+bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ const struct kgd2kfd_shared_resources *gpu_resources);
+void kgd2kfd_device_exit(struct kfd_dev *kfd);
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc);
+int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc);
+void kgd2kfd_suspend_process(struct kfd_dev *kfd);
+int kgd2kfd_resume_process(struct kfd_dev *kfd);
+int kgd2kfd_pre_reset(struct kfd_dev *kfd,
+ struct amdgpu_reset_context *reset_context);
+int kgd2kfd_post_reset(struct kfd_dev *kfd);
+void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
+void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
+void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
+int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd);
+void kgd2kfd_unlock_kfd(struct kfd_dev *kfd);
+int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id);
+int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd);
+int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id);
+int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd);
+bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id);
+bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
+ bool retry_fault);
+
+#else
+static inline int kgd2kfd_init(void)
+{
+ return -ENOENT;
+}
+
+static inline void kgd2kfd_exit(void)
+{
+}
+
+static inline
+struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
+{
+ return NULL;
+}
+
+static inline
+bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ const struct kgd2kfd_shared_resources *gpu_resources)
+{
+ return false;
+}
+
+static inline void kgd2kfd_device_exit(struct kfd_dev *kfd)
+{
+}
+
+static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc)
+{
+}
+
+static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc)
+{
+ return 0;
+}
+
+static inline void kgd2kfd_suspend_process(struct kfd_dev *kfd)
+{
+}
+
+static inline int kgd2kfd_resume_process(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
+static inline int kgd2kfd_pre_reset(struct kfd_dev *kfd,
+ struct amdgpu_reset_context *reset_context)
+{
+ return 0;
+}
+
+static inline int kgd2kfd_post_reset(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
+static inline
+void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
+{
+}
+
+static inline
+void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
+{
+}
+
+static inline
+void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
+{
+}
+
+static inline int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
+static inline void kgd2kfd_unlock_kfd(struct kfd_dev *kfd)
+{
+}
+
+static inline int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id)
+{
+ return 0;
+}
+
+static inline int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
+static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id)
+{
+ return 0;
+}
+
+static inline int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd)
+{
+ return 0;
+}
+
+static inline bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
+{
+ return false;
+}
-uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+static inline bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry,
+ bool retry_fault)
+{
+ return false;
+}
+#endif
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
new file mode 100644
index 000000000000..7e9f7a280c1b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
@@ -0,0 +1,197 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_amdkfd_arcturus.h"
+#include "amdgpu_amdkfd_gfx_v9.h"
+#include "amdgpu_amdkfd_aldebaran.h"
+#include "gc/gc_9_4_2_offset.h"
+#include "gc/gc_9_4_2_sh_mask.h"
+#include <uapi/linux/kfd_ioctl.h>
+
+/*
+ * Returns TRAP_EN, EXCP_EN and EXCP_REPLACE.
+ *
+ * restore_dbg_registers is ignored here but is a general interface requirement
+ * for devices that support GFXOFF and where the RLC save/restore list
+ * does not support hw registers for debugging i.e. the driver has to manually
+ * initialize the debug mode registers after it has disabled GFX off during the
+ * debug session.
+ */
+uint32_t kgd_aldebaran_enable_debug_trap(struct amdgpu_device *adev,
+ bool restore_dbg_registers,
+ uint32_t vmid)
+{
+ uint32_t data = 0;
+
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0);
+
+ return data;
+}
+
+/* returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */
+static uint32_t kgd_aldebaran_disable_debug_trap(struct amdgpu_device *adev,
+ bool keep_trap_enabled,
+ uint32_t vmid)
+{
+ uint32_t data = 0;
+
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, keep_trap_enabled);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0);
+
+ return data;
+}
+
+static int kgd_aldebaran_validate_trap_override_request(struct amdgpu_device *adev,
+ uint32_t trap_override,
+ uint32_t *trap_mask_supported)
+{
+ *trap_mask_supported &= KFD_DBG_TRAP_MASK_FP_INVALID |
+ KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL |
+ KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO |
+ KFD_DBG_TRAP_MASK_FP_OVERFLOW |
+ KFD_DBG_TRAP_MASK_FP_UNDERFLOW |
+ KFD_DBG_TRAP_MASK_FP_INEXACT |
+ KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO |
+ KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH |
+ KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION;
+
+ if (trap_override != KFD_DBG_TRAP_OVERRIDE_OR &&
+ trap_override != KFD_DBG_TRAP_OVERRIDE_REPLACE)
+ return -EPERM;
+
+ return 0;
+}
+
+/* returns TRAP_EN, EXCP_EN and EXCP_RPLACE. */
+static uint32_t kgd_aldebaran_set_wave_launch_trap_override(struct amdgpu_device *adev,
+ uint32_t vmid,
+ uint32_t trap_override,
+ uint32_t trap_mask_bits,
+ uint32_t trap_mask_request,
+ uint32_t *trap_mask_prev,
+ uint32_t kfd_dbg_trap_cntl_prev)
+
+{
+ uint32_t data = 0;
+
+ *trap_mask_prev = REG_GET_FIELD(kfd_dbg_trap_cntl_prev, SPI_GDBG_PER_VMID_CNTL, EXCP_EN);
+ trap_mask_bits = (trap_mask_bits & trap_mask_request) |
+ (*trap_mask_prev & ~trap_mask_request);
+
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, trap_mask_bits);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, trap_override);
+
+ return data;
+}
+
+uint32_t kgd_aldebaran_set_wave_launch_mode(struct amdgpu_device *adev,
+ uint8_t wave_launch_mode,
+ uint32_t vmid)
+{
+ uint32_t data = 0;
+
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, LAUNCH_MODE, wave_launch_mode);
+
+ return data;
+}
+
+#define TCP_WATCH_STRIDE (regTCP_WATCH1_ADDR_H - regTCP_WATCH0_ADDR_H)
+static uint32_t kgd_gfx_aldebaran_set_address_watch(
+ struct amdgpu_device *adev,
+ uint64_t watch_address,
+ uint32_t watch_address_mask,
+ uint32_t watch_id,
+ uint32_t watch_mode,
+ uint32_t debug_vmid,
+ uint32_t inst)
+{
+ uint32_t watch_address_high;
+ uint32_t watch_address_low;
+ uint32_t watch_address_cntl;
+
+ watch_address_cntl = 0;
+ watch_address_low = lower_32_bits(watch_address);
+ watch_address_high = upper_32_bits(watch_address) & 0xffff;
+
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ MODE,
+ watch_mode);
+
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ MASK,
+ watch_address_mask >> 6);
+
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ VALID,
+ 1);
+
+ WREG32_RLC((SOC15_REG_OFFSET(GC, 0, regTCP_WATCH0_ADDR_H) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_high);
+
+ WREG32_RLC((SOC15_REG_OFFSET(GC, 0, regTCP_WATCH0_ADDR_L) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_low);
+
+ return watch_address_cntl;
+}
+
+const struct kfd2kgd_calls aldebaran_kfd2kgd = {
+ .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_gfx_v9_init_interrupts,
+ .hqd_load = kgd_gfx_v9_hqd_load,
+ .hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load,
+ .hqd_sdma_load = kgd_arcturus_hqd_sdma_load,
+ .hqd_dump = kgd_gfx_v9_hqd_dump,
+ .hqd_sdma_dump = kgd_arcturus_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_arcturus_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_gfx_v9_hqd_destroy,
+ .hqd_sdma_destroy = kgd_arcturus_hqd_sdma_destroy,
+ .wave_control_execute = kgd_gfx_v9_wave_control_execute,
+ .get_atc_vmid_pasid_mapping_info =
+ kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
+ .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
+ .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
+ .enable_debug_trap = kgd_aldebaran_enable_debug_trap,
+ .disable_debug_trap = kgd_aldebaran_disable_debug_trap,
+ .validate_trap_override_request = kgd_aldebaran_validate_trap_override_request,
+ .set_wave_launch_trap_override = kgd_aldebaran_set_wave_launch_trap_override,
+ .set_wave_launch_mode = kgd_aldebaran_set_wave_launch_mode,
+ .set_address_watch = kgd_gfx_aldebaran_set_address_watch,
+ .clear_address_watch = kgd_gfx_v9_clear_address_watch,
+ .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
+ .build_dequeue_wait_counts_packet_info = kgd_gfx_v9_build_dequeue_wait_counts_packet_info,
+ .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
+ .hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v9_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v9_hqd_sdma_get_doorbell
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.h
new file mode 100644
index 000000000000..a7bdaf8d82dd
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+uint32_t kgd_aldebaran_enable_debug_trap(struct amdgpu_device *adev,
+ bool restore_dbg_registers,
+ uint32_t vmid);
+uint32_t kgd_aldebaran_set_wave_launch_mode(struct amdgpu_device *adev,
+ uint8_t wave_launch_mode,
+ uint32_t vmid);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
new file mode 100644
index 000000000000..1105a09e55dc
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -0,0 +1,424 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_amdkfd_arcturus.h"
+#include "amdgpu_reset.h"
+#include "sdma0/sdma0_4_2_2_offset.h"
+#include "sdma0/sdma0_4_2_2_sh_mask.h"
+#include "sdma1/sdma1_4_2_2_offset.h"
+#include "sdma1/sdma1_4_2_2_sh_mask.h"
+#include "sdma2/sdma2_4_2_2_offset.h"
+#include "sdma2/sdma2_4_2_2_sh_mask.h"
+#include "sdma3/sdma3_4_2_2_offset.h"
+#include "sdma3/sdma3_4_2_2_sh_mask.h"
+#include "sdma4/sdma4_4_2_2_offset.h"
+#include "sdma4/sdma4_4_2_2_sh_mask.h"
+#include "sdma5/sdma5_4_2_2_offset.h"
+#include "sdma5/sdma5_4_2_2_sh_mask.h"
+#include "sdma6/sdma6_4_2_2_offset.h"
+#include "sdma6/sdma6_4_2_2_sh_mask.h"
+#include "sdma7/sdma7_4_2_2_offset.h"
+#include "sdma7/sdma7_4_2_2_sh_mask.h"
+#include "v9_structs.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "amdgpu_amdkfd_gfx_v9.h"
+#include "gfxhub_v1_0.h"
+#include "mmhub_v9_4.h"
+#include "gc/gc_9_0_offset.h"
+#include "gc/gc_9_0_sh_mask.h"
+
+#define HQD_N_REGS 56
+#define DUMP_REG(addr) do { \
+ if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
+ break; \
+ (*dump)[i][0] = (addr) << 2; \
+ (*dump)[i++][1] = RREG32(addr); \
+ } while (0)
+
+static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
+{
+ return (struct v9_sdma_mqd *)mqd;
+}
+
+static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
+ unsigned int engine_id,
+ unsigned int queue_id)
+{
+ uint32_t sdma_engine_reg_base = 0;
+ uint32_t sdma_rlc_reg_offset;
+
+ switch (engine_id) {
+ default:
+ dev_warn(adev->dev,
+ "Invalid sdma engine id (%d), using engine id 0\n",
+ engine_id);
+ fallthrough;
+ case 0:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+ mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
+ break;
+ case 1:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
+ mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL;
+ break;
+ case 2:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA2, 0,
+ mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
+ break;
+ case 3:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA3, 0,
+ mmSDMA3_RLC0_RB_CNTL) - mmSDMA3_RLC0_RB_CNTL;
+ break;
+ case 4:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA4, 0,
+ mmSDMA4_RLC0_RB_CNTL) - mmSDMA4_RLC0_RB_CNTL;
+ break;
+ case 5:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA5, 0,
+ mmSDMA5_RLC0_RB_CNTL) - mmSDMA5_RLC0_RB_CNTL;
+ break;
+ case 6:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA6, 0,
+ mmSDMA6_RLC0_RB_CNTL) - mmSDMA6_RLC0_RB_CNTL;
+ break;
+ case 7:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA7, 0,
+ mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL;
+ break;
+ }
+
+ sdma_rlc_reg_offset = sdma_engine_reg_base
+ + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
+
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
+ queue_id, sdma_rlc_reg_offset);
+
+ return sdma_rlc_reg_offset;
+}
+
+int kgd_arcturus_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm)
+{
+ struct v9_sdma_mqd *m;
+ uint32_t sdma_rlc_reg_offset;
+ unsigned long end_jiffies;
+ uint32_t data;
+ uint64_t data64;
+ uint64_t __user *wptr64 = (uint64_t __user *)wptr;
+
+ m = get_sdma_mqd(mqd);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
+ m->sdma_queue_id);
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
+
+ end_jiffies = msecs_to_jiffies(2000) + jiffies;
+ while (true) {
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
+ if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+ break;
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
+ return -ETIME;
+ }
+ usleep_range(500, 1000);
+ }
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
+ m->sdmax_rlcx_doorbell_offset);
+
+ data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
+ ENABLE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
+ m->sdmax_rlcx_rb_rptr_hi);
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
+ if (read_user_wptr(mm, wptr64, data64)) {
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
+ lower_32_bits(data64));
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
+ upper_32_bits(data64));
+ } else {
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
+ m->sdmax_rlcx_rb_rptr_hi);
+ }
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
+ m->sdmax_rlcx_rb_base_hi);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ m->sdmax_rlcx_rb_rptr_addr_lo);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ m->sdmax_rlcx_rb_rptr_addr_hi);
+
+ data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
+ RB_ENABLE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
+
+ return 0;
+}
+
+int kgd_arcturus_hqd_sdma_dump(struct amdgpu_device *adev,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs)
+{
+ uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
+ engine_id, queue_id);
+ uint32_t i = 0, reg;
+#undef HQD_N_REGS
+#define HQD_N_REGS (19+6+7+10)
+
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+ for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+ for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
+ reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+ for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
+ reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
+
+ return 0;
+}
+
+bool kgd_arcturus_hqd_sdma_is_occupied(struct amdgpu_device *adev,
+ void *mqd)
+{
+ struct v9_sdma_mqd *m;
+ uint32_t sdma_rlc_reg_offset;
+ uint32_t sdma_rlc_rb_cntl;
+
+ m = get_sdma_mqd(mqd);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
+ m->sdma_queue_id);
+
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
+
+ if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
+ return true;
+
+ return false;
+}
+
+int kgd_arcturus_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
+ unsigned int utimeout)
+{
+ struct v9_sdma_mqd *m;
+ uint32_t sdma_rlc_reg_offset;
+ uint32_t temp;
+ unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
+
+ m = get_sdma_mqd(mqd);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
+ m->sdma_queue_id);
+
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
+ temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
+
+ while (true) {
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
+ if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+ break;
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
+ return -ETIME;
+ }
+ usleep_range(500, 1000);
+ }
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
+ SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
+
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr_hi =
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
+
+ return 0;
+}
+
+/*
+ * Helper used to suspend/resume gfx pipe for image post process work to set
+ * barrier behaviour.
+ */
+static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool suspend)
+{
+ int i, r = 0;
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
+
+ if (!amdgpu_ring_sched_ready(ring))
+ continue;
+
+ /* stop secheduler and drain ring. */
+ if (suspend) {
+ drm_sched_stop(&ring->sched, NULL);
+ r = amdgpu_fence_wait_empty(ring);
+ if (r)
+ goto out;
+ } else {
+ drm_sched_start(&ring->sched, 0);
+ }
+ }
+
+out:
+ /* return on resume or failure to drain rings. */
+ if (!suspend || r)
+ return r;
+
+ return amdgpu_device_ip_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GFX);
+}
+
+static void set_barrier_auto_waitcnt(struct amdgpu_device *adev, bool enable_waitcnt)
+{
+ uint32_t data;
+
+ WRITE_ONCE(adev->barrier_has_auto_waitcnt, enable_waitcnt);
+
+ if (!down_read_trylock(&adev->reset_domain->sem))
+ return;
+
+ amdgpu_amdkfd_suspend(adev, true);
+
+ if (suspend_resume_compute_scheduler(adev, true))
+ goto out;
+
+ data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CONFIG));
+ data = REG_SET_FIELD(data, SQ_CONFIG, DISABLE_BARRIER_WAITCNT,
+ !enable_waitcnt);
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CONFIG), data);
+
+out:
+ suspend_resume_compute_scheduler(adev, false);
+
+ amdgpu_amdkfd_resume(adev, true);
+
+ up_read(&adev->reset_domain->sem);
+}
+
+/*
+ * restore_dbg_registers is ignored here but is a general interface requirement
+ * for devices that support GFXOFF and where the RLC save/restore list
+ * does not support hw registers for debugging i.e. the driver has to manually
+ * initialize the debug mode registers after it has disabled GFX off during the
+ * debug session.
+ */
+static uint32_t kgd_arcturus_enable_debug_trap(struct amdgpu_device *adev,
+ bool restore_dbg_registers,
+ uint32_t vmid)
+{
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true);
+
+ set_barrier_auto_waitcnt(adev, true);
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
+
+ kgd_gfx_v9_set_wave_launch_stall(adev, vmid, false);
+
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+/*
+ * keep_trap_enabled is ignored here but is a general interface requirement
+ * for devices that support multi-process debugging where the performance
+ * overhead from trap temporary setup needs to be bypassed when the debug
+ * session has ended.
+ */
+static uint32_t kgd_arcturus_disable_debug_trap(struct amdgpu_device *adev,
+ bool keep_trap_enabled,
+ uint32_t vmid)
+{
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true);
+
+ set_barrier_auto_waitcnt(adev, false);
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
+
+ kgd_gfx_v9_set_wave_launch_stall(adev, vmid, false);
+
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+const struct kfd2kgd_calls arcturus_kfd2kgd = {
+ .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_gfx_v9_init_interrupts,
+ .hqd_load = kgd_gfx_v9_hqd_load,
+ .hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load,
+ .hqd_sdma_load = kgd_arcturus_hqd_sdma_load,
+ .hqd_dump = kgd_gfx_v9_hqd_dump,
+ .hqd_sdma_dump = kgd_arcturus_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_arcturus_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_gfx_v9_hqd_destroy,
+ .hqd_sdma_destroy = kgd_arcturus_hqd_sdma_destroy,
+ .wave_control_execute = kgd_gfx_v9_wave_control_execute,
+ .get_atc_vmid_pasid_mapping_info =
+ kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
+ .set_vm_context_page_table_base =
+ kgd_gfx_v9_set_vm_context_page_table_base,
+ .enable_debug_trap = kgd_arcturus_enable_debug_trap,
+ .disable_debug_trap = kgd_arcturus_disable_debug_trap,
+ .validate_trap_override_request = kgd_gfx_v9_validate_trap_override_request,
+ .set_wave_launch_trap_override = kgd_gfx_v9_set_wave_launch_trap_override,
+ .set_wave_launch_mode = kgd_gfx_v9_set_wave_launch_mode,
+ .set_address_watch = kgd_gfx_v9_set_address_watch,
+ .clear_address_watch = kgd_gfx_v9_clear_address_watch,
+ .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
+ .build_dequeue_wait_counts_packet_info = kgd_gfx_v9_build_dequeue_wait_counts_packet_info,
+ .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
+ .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
+ .hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v9_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v9_hqd_sdma_get_doorbell
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h
new file mode 100644
index 000000000000..756c1a5679c0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+int kgd_arcturus_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm);
+int kgd_arcturus_hqd_sdma_dump(struct amdgpu_device *adev,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs);
+bool kgd_arcturus_hqd_sdma_is_occupied(struct amdgpu_device *adev,
+ void *mqd);
+int kgd_arcturus_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
+ unsigned int utimeout);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
new file mode 100644
index 000000000000..1ef758ac5076
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
@@ -0,0 +1,188 @@
+/*
+ * Copyright 2016-2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/dma-fence.h>
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+#include <linux/stacktrace.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/sched/mm.h>
+#include "amdgpu_amdkfd.h"
+#include "kfd_svm.h"
+
+static const struct dma_fence_ops amdkfd_fence_ops;
+static atomic_t fence_seq = ATOMIC_INIT(0);
+
+/* Eviction Fence
+ * Fence helper functions to deal with KFD memory eviction.
+ * Big Idea - Since KFD submissions are done by user queues, a BO cannot be
+ * evicted unless all the user queues for that process are evicted.
+ *
+ * All the BOs in a process share an eviction fence. When process X wants
+ * to map VRAM memory but TTM can't find enough space, TTM will attempt to
+ * evict BOs from its LRU list. TTM checks if the BO is valuable to evict
+ * by calling ttm_device_funcs->eviction_valuable().
+ *
+ * ttm_device_funcs->eviction_valuable() - will return false if the BO belongs
+ * to process X. Otherwise, it will return true to indicate BO can be
+ * evicted by TTM.
+ *
+ * If ttm_device_funcs->eviction_valuable returns true, then TTM will continue
+ * the evcition process for that BO by calling ttm_bo_evict --> amdgpu_bo_move
+ * --> amdgpu_copy_buffer(). This sets up job in GPU scheduler.
+ *
+ * GPU Scheduler (amd_sched_main) - sets up a cb (fence_add_callback) to
+ * nofity when the BO is free to move. fence_add_callback --> enable_signaling
+ * --> amdgpu_amdkfd_fence.enable_signaling
+ *
+ * amdgpu_amdkfd_fence.enable_signaling - Start a work item that will quiesce
+ * user queues and signal fence. The work item will also start another delayed
+ * work item to restore BOs
+ */
+
+struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
+ struct mm_struct *mm,
+ struct svm_range_bo *svm_bo)
+{
+ struct amdgpu_amdkfd_fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (fence == NULL)
+ return NULL;
+
+ /* This reference gets released in amdkfd_fence_release */
+ mmgrab(mm);
+ fence->mm = mm;
+ get_task_comm(fence->timeline_name, current);
+ spin_lock_init(&fence->lock);
+ fence->svm_bo = svm_bo;
+ dma_fence_init(&fence->base, &amdkfd_fence_ops, &fence->lock,
+ context, atomic_inc_return(&fence_seq));
+
+ return fence;
+}
+
+struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
+{
+ struct amdgpu_amdkfd_fence *fence;
+
+ if (!f)
+ return NULL;
+
+ fence = container_of(f, struct amdgpu_amdkfd_fence, base);
+ if (f->ops == &amdkfd_fence_ops)
+ return fence;
+
+ return NULL;
+}
+
+static const char *amdkfd_fence_get_driver_name(struct dma_fence *f)
+{
+ return "amdgpu_amdkfd_fence";
+}
+
+static const char *amdkfd_fence_get_timeline_name(struct dma_fence *f)
+{
+ struct amdgpu_amdkfd_fence *fence = to_amdgpu_amdkfd_fence(f);
+
+ return fence->timeline_name;
+}
+
+/**
+ * amdkfd_fence_enable_signaling - This gets called when TTM wants to evict
+ * a KFD BO and schedules a job to move the BO.
+ * If fence is already signaled return true.
+ * If fence is not signaled schedule a evict KFD process work item.
+ *
+ * @f: dma_fence
+ */
+static bool amdkfd_fence_enable_signaling(struct dma_fence *f)
+{
+ struct amdgpu_amdkfd_fence *fence = to_amdgpu_amdkfd_fence(f);
+
+ if (!fence)
+ return false;
+
+ if (dma_fence_is_signaled(f))
+ return true;
+
+ if (!fence->svm_bo) {
+ if (!kgd2kfd_schedule_evict_and_restore_process(fence->mm, f))
+ return true;
+ } else {
+ if (!svm_range_schedule_evict_svm_bo(fence))
+ return true;
+ }
+ return false;
+}
+
+/**
+ * amdkfd_fence_release - callback that fence can be freed
+ *
+ * @f: dma_fence
+ *
+ * This function is called when the reference count becomes zero.
+ * Drops the mm_struct reference and RCU schedules freeing up the fence.
+ */
+static void amdkfd_fence_release(struct dma_fence *f)
+{
+ struct amdgpu_amdkfd_fence *fence = to_amdgpu_amdkfd_fence(f);
+
+ /* Unconditionally signal the fence. The process is getting
+ * terminated.
+ */
+ if (WARN_ON(!fence))
+ return; /* Not an amdgpu_amdkfd_fence */
+
+ mmdrop(fence->mm);
+ kfree_rcu(f, rcu);
+}
+
+/**
+ * amdkfd_fence_check_mm - Check whether to prevent eviction of @f by @mm
+ *
+ * @f: [IN] fence
+ * @mm: [IN] mm that needs to be verified
+ *
+ * Check if @mm is same as that of the fence @f, if same return TRUE else
+ * return FALSE.
+ * For svm bo, which support vram overcommitment, always return FALSE.
+ */
+bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
+{
+ struct amdgpu_amdkfd_fence *fence = to_amdgpu_amdkfd_fence(f);
+
+ if (!fence)
+ return false;
+ else if (fence->mm == mm && !fence->svm_bo)
+ return true;
+
+ return false;
+}
+
+static const struct dma_fence_ops amdkfd_fence_ops = {
+ .get_driver_name = amdkfd_fence_get_driver_name,
+ .get_timeline_name = amdkfd_fence_get_timeline_name,
+ .enable_signaling = amdkfd_fence_enable_signaling,
+ .release = amdkfd_fence_release,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
new file mode 100644
index 000000000000..89a45a9218f3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
@@ -0,0 +1,559 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_amdkfd_gfx_v9.h"
+#include "amdgpu_amdkfd_aldebaran.h"
+#include "gc/gc_9_4_3_offset.h"
+#include "gc/gc_9_4_3_sh_mask.h"
+#include "athub/athub_1_8_0_offset.h"
+#include "athub/athub_1_8_0_sh_mask.h"
+#include "oss/osssys_4_4_2_offset.h"
+#include "oss/osssys_4_4_2_sh_mask.h"
+#include "v9_structs.h"
+#include "soc15.h"
+#include "sdma/sdma_4_4_2_offset.h"
+#include "sdma/sdma_4_4_2_sh_mask.h"
+#include <uapi/linux/kfd_ioctl.h>
+
+static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
+{
+ return (struct v9_sdma_mqd *)mqd;
+}
+
+static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
+ unsigned int engine_id,
+ unsigned int queue_id)
+{
+ uint32_t sdma_engine_reg_base =
+ SOC15_REG_OFFSET(SDMA0, GET_INST(SDMA0, engine_id),
+ regSDMA_RLC0_RB_CNTL) -
+ regSDMA_RLC0_RB_CNTL;
+ uint32_t retval = sdma_engine_reg_base +
+ queue_id * (regSDMA_RLC1_RB_CNTL - regSDMA_RLC0_RB_CNTL);
+
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
+ queue_id, retval);
+ return retval;
+}
+
+static int kgd_gfx_v9_4_3_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm)
+{
+ struct v9_sdma_mqd *m;
+ uint32_t sdma_rlc_reg_offset;
+ unsigned long end_jiffies;
+ uint32_t data;
+ uint64_t data64;
+ uint64_t __user *wptr64 = (uint64_t __user *)wptr;
+
+ m = get_sdma_mqd(mqd);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
+ m->sdma_queue_id);
+
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL,
+ m->sdmax_rlcx_rb_cntl & (~SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK));
+
+ end_jiffies = msecs_to_jiffies(2000) + jiffies;
+ while (true) {
+ data = RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_CONTEXT_STATUS);
+ if (data & SDMA_RLC0_CONTEXT_STATUS__IDLE_MASK)
+ break;
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
+ return -ETIME;
+ }
+ usleep_range(500, 1000);
+ }
+
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_DOORBELL_OFFSET,
+ m->sdmax_rlcx_doorbell_offset);
+
+ data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA_RLC0_DOORBELL,
+ ENABLE, 1);
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR_HI,
+ m->sdmax_rlcx_rb_rptr_hi);
+
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_MINOR_PTR_UPDATE, 1);
+ if (read_user_wptr(mm, wptr64, data64)) {
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_WPTR,
+ lower_32_bits(data64));
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_WPTR_HI,
+ upper_32_bits(data64));
+ } else {
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_WPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_WPTR_HI,
+ m->sdmax_rlcx_rb_rptr_hi);
+ }
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_MINOR_PTR_UPDATE, 0);
+
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_BASE_HI,
+ m->sdmax_rlcx_rb_base_hi);
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR_ADDR_LO,
+ m->sdmax_rlcx_rb_rptr_addr_lo);
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR_ADDR_HI,
+ m->sdmax_rlcx_rb_rptr_addr_hi);
+
+ data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA_RLC0_RB_CNTL,
+ RB_ENABLE, 1);
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL, data);
+
+ return 0;
+}
+
+static int kgd_gfx_v9_4_3_hqd_sdma_dump(struct amdgpu_device *adev,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs)
+{
+ uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
+ engine_id, queue_id);
+ uint32_t i = 0, reg;
+#undef HQD_N_REGS
+#define HQD_N_REGS (19+6+7+12)
+#define DUMP_REG(addr) do { \
+ if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
+ break; \
+ (*dump)[i][0] = (addr) << 2; \
+ (*dump)[i++][1] = RREG32(addr); \
+ } while (0)
+
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ for (reg = regSDMA_RLC0_RB_CNTL; reg <= regSDMA_RLC0_DOORBELL; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+ for (reg = regSDMA_RLC0_STATUS; reg <= regSDMA_RLC0_CSA_ADDR_HI; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+ for (reg = regSDMA_RLC0_IB_SUB_REMAIN;
+ reg <= regSDMA_RLC0_MINOR_PTR_UPDATE; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+ for (reg = regSDMA_RLC0_MIDCMD_DATA0;
+ reg <= regSDMA_RLC0_MIDCMD_CNTL; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
+
+ return 0;
+}
+
+static bool kgd_gfx_v9_4_3_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
+{
+ struct v9_sdma_mqd *m;
+ uint32_t sdma_rlc_reg_offset;
+ uint32_t sdma_rlc_rb_cntl;
+
+ m = get_sdma_mqd(mqd);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
+ m->sdma_queue_id);
+
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL);
+
+ if (sdma_rlc_rb_cntl & SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK)
+ return true;
+
+ return false;
+}
+
+static int kgd_gfx_v9_4_3_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
+ unsigned int utimeout)
+{
+ struct v9_sdma_mqd *m;
+ uint32_t sdma_rlc_reg_offset;
+ uint32_t temp;
+ unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
+
+ m = get_sdma_mqd(mqd);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
+ m->sdma_queue_id);
+
+ temp = RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL);
+ temp = temp & ~SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK;
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL, temp);
+
+ while (true) {
+ temp = RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_CONTEXT_STATUS);
+ if (temp & SDMA_RLC0_CONTEXT_STATUS__IDLE_MASK)
+ break;
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
+ return -ETIME;
+ }
+ usleep_range(500, 1000);
+ }
+
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL) |
+ SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK);
+
+ m->sdmax_rlcx_rb_rptr =
+ RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr_hi =
+ RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR_HI);
+
+ return 0;
+}
+
+static int kgd_gfx_v9_4_3_set_pasid_vmid_mapping(struct amdgpu_device *adev,
+ u32 pasid, unsigned int vmid, uint32_t xcc_inst)
+{
+ unsigned long timeout;
+ unsigned int reg;
+ unsigned int phy_inst = GET_INST(GC, xcc_inst);
+ /* Every two XCCs share one AID */
+ unsigned int aid = phy_inst / 2;
+
+ /*
+ * We have to assume that there is no outstanding mapping.
+ * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
+ * a mapping is in progress or because a mapping finished
+ * and the SW cleared it.
+ * So the protocol is to always wait & clear.
+ */
+ uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
+ ATC_VMID0_PASID_MAPPING__VALID_MASK;
+
+ WREG32(SOC15_REG_OFFSET(ATHUB, 0,
+ regATC_VMID0_PASID_MAPPING) + vmid, pasid_mapping);
+
+ timeout = jiffies + msecs_to_jiffies(10);
+ while (!(RREG32(SOC15_REG_OFFSET(ATHUB, 0,
+ regATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
+ (1U << vmid))) {
+ if (time_after(jiffies, timeout)) {
+ pr_err("Fail to program VMID-PASID mapping\n");
+ return -ETIME;
+ }
+ cpu_relax();
+ }
+
+ WREG32(SOC15_REG_OFFSET(ATHUB, 0,
+ regATC_VMID_PASID_MAPPING_UPDATE_STATUS),
+ 1U << vmid);
+
+ reg = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_LUT_INDEX));
+ /* Every 4 numbers is a cycle. 1st is AID, 2nd and 3rd are XCDs,
+ * and the 4th is reserved. Therefore "aid * 4 + (xcc_inst % 2) + 1"
+ * programs _LUT for XCC and "aid * 4" for AID where the XCC connects
+ * to.
+ */
+ WREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_LUT_INDEX),
+ aid * 4 + (phy_inst % 2) + 1);
+ WREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid,
+ pasid_mapping);
+ WREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_LUT_INDEX),
+ aid * 4);
+ WREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT_MM) + vmid,
+ pasid_mapping);
+ WREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_LUT_INDEX), reg);
+
+ return 0;
+}
+
+static inline struct v9_mqd *get_mqd(void *mqd)
+{
+ return (struct v9_mqd *)mqd;
+}
+
+static int kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t __user *wptr, uint32_t wptr_shift,
+ uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst)
+{
+ struct v9_mqd *m;
+ uint32_t *mqd_hqd;
+ uint32_t reg, hqd_base, hqd_end, data;
+
+ m = get_mqd(mqd);
+
+ kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
+
+ /* HQD registers extend to CP_HQD_AQL_DISPATCH_ID_HI */
+ mqd_hqd = &m->cp_mqd_base_addr_lo;
+ hqd_base = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_MQD_BASE_ADDR);
+ hqd_end = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_AQL_DISPATCH_ID_HI);
+
+ for (reg = hqd_base; reg <= hqd_end; reg++)
+ WREG32_XCC(reg, mqd_hqd[reg - hqd_base], inst);
+
+
+ /* Activate doorbell logic before triggering WPTR poll. */
+ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
+ CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_PQ_DOORBELL_CONTROL, data);
+
+ if (wptr) {
+ /* Don't read wptr with get_user because the user
+ * context may not be accessible (if this function
+ * runs in a work queue). Instead trigger a one-shot
+ * polling read from memory in the CP. This assumes
+ * that wptr is GPU-accessible in the queue's VMID via
+ * ATC or SVM. WPTR==RPTR before starting the poll so
+ * the CP starts fetching new commands from the right
+ * place.
+ *
+ * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
+ * tricky. Assume that the queue didn't overflow. The
+ * number of valid bits in the 32-bit RPTR depends on
+ * the queue size. The remaining bits are taken from
+ * the saved 64-bit WPTR. If the WPTR wrapped, add the
+ * queue size.
+ */
+ uint32_t queue_size =
+ 2 << REG_GET_FIELD(m->cp_hqd_pq_control,
+ CP_HQD_PQ_CONTROL, QUEUE_SIZE);
+ uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
+
+ if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
+ guessed_wptr += queue_size;
+ guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
+ guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
+
+ WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_LO,
+ lower_32_bits(guessed_wptr));
+ WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_HI,
+ upper_32_bits(guessed_wptr));
+ WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_POLL_ADDR,
+ lower_32_bits((uintptr_t)wptr));
+ WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
+ upper_32_bits((uintptr_t)wptr));
+ WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_PQ_WPTR_POLL_CNTL1,
+ (uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, queue_id));
+ }
+
+ /* Start the EOP fetcher */
+ WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_EOP_RPTR,
+ REG_SET_FIELD(m->cp_hqd_eop_rptr, CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
+
+ data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_ACTIVE, data);
+
+ kgd_gfx_v9_release_queue(adev, inst);
+
+ return 0;
+}
+
+/* returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */
+static uint32_t kgd_gfx_v9_4_3_disable_debug_trap(struct amdgpu_device *adev,
+ bool keep_trap_enabled,
+ uint32_t vmid)
+{
+ uint32_t data = 0;
+
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0);
+
+ return data;
+}
+
+static int kgd_gfx_v9_4_3_validate_trap_override_request(
+ struct amdgpu_device *adev,
+ uint32_t trap_override,
+ uint32_t *trap_mask_supported)
+{
+ *trap_mask_supported &= KFD_DBG_TRAP_MASK_FP_INVALID |
+ KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL |
+ KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO |
+ KFD_DBG_TRAP_MASK_FP_OVERFLOW |
+ KFD_DBG_TRAP_MASK_FP_UNDERFLOW |
+ KFD_DBG_TRAP_MASK_FP_INEXACT |
+ KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO |
+ KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH |
+ KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION |
+ KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START |
+ KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END;
+
+ if (trap_override != KFD_DBG_TRAP_OVERRIDE_OR &&
+ trap_override != KFD_DBG_TRAP_OVERRIDE_REPLACE)
+ return -EPERM;
+
+ return 0;
+}
+
+static uint32_t trap_mask_map_sw_to_hw(uint32_t mask)
+{
+ uint32_t trap_on_start = (mask & KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START) ? 1 : 0;
+ uint32_t trap_on_end = (mask & KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END) ? 1 : 0;
+ uint32_t excp_en = mask & (KFD_DBG_TRAP_MASK_FP_INVALID |
+ KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL |
+ KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO |
+ KFD_DBG_TRAP_MASK_FP_OVERFLOW |
+ KFD_DBG_TRAP_MASK_FP_UNDERFLOW |
+ KFD_DBG_TRAP_MASK_FP_INEXACT |
+ KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO |
+ KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH |
+ KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION);
+ uint32_t ret;
+
+ ret = REG_SET_FIELD(0, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, excp_en);
+ ret = REG_SET_FIELD(ret, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_START, trap_on_start);
+ ret = REG_SET_FIELD(ret, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_END, trap_on_end);
+
+ return ret;
+}
+
+static uint32_t trap_mask_map_hw_to_sw(uint32_t mask)
+{
+ uint32_t ret = REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, EXCP_EN);
+
+ if (REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_START))
+ ret |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START;
+
+ if (REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_END))
+ ret |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END;
+
+ return ret;
+}
+
+/* returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */
+static uint32_t kgd_gfx_v9_4_3_set_wave_launch_trap_override(
+ struct amdgpu_device *adev,
+ uint32_t vmid,
+ uint32_t trap_override,
+ uint32_t trap_mask_bits,
+ uint32_t trap_mask_request,
+ uint32_t *trap_mask_prev,
+ uint32_t kfd_dbg_trap_cntl_prev)
+
+{
+ uint32_t data = 0;
+
+ *trap_mask_prev = trap_mask_map_hw_to_sw(kfd_dbg_trap_cntl_prev);
+
+ data = (trap_mask_bits & trap_mask_request) |
+ (*trap_mask_prev & ~trap_mask_request);
+ data = trap_mask_map_sw_to_hw(data);
+
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, trap_override);
+
+ return data;
+}
+
+#define TCP_WATCH_STRIDE (regTCP_WATCH1_ADDR_H - regTCP_WATCH0_ADDR_H)
+static uint32_t kgd_gfx_v9_4_3_set_address_watch(
+ struct amdgpu_device *adev,
+ uint64_t watch_address,
+ uint32_t watch_address_mask,
+ uint32_t watch_id,
+ uint32_t watch_mode,
+ uint32_t debug_vmid,
+ uint32_t inst)
+{
+ uint32_t watch_address_high;
+ uint32_t watch_address_low;
+ uint32_t watch_address_cntl;
+
+ watch_address_cntl = 0;
+ watch_address_low = lower_32_bits(watch_address);
+ watch_address_high = upper_32_bits(watch_address) & 0xffff;
+
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ MODE,
+ watch_mode);
+
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ MASK,
+ watch_address_mask >> 7);
+
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ VALID,
+ 1);
+
+ WREG32_XCC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
+ regTCP_WATCH0_ADDR_H) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_high, inst);
+
+ WREG32_XCC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
+ regTCP_WATCH0_ADDR_L) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_low, inst);
+
+ return watch_address_cntl;
+}
+
+static uint32_t kgd_gfx_v9_4_3_clear_address_watch(struct amdgpu_device *adev,
+ uint32_t watch_id)
+{
+ return 0;
+}
+
+static uint32_t kgd_gfx_v9_4_3_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+{
+ uint32_t reg_offset = get_sdma_rlc_reg_offset(adev, engine, queue);
+ uint32_t status = RREG32(regSDMA_RLC0_CONTEXT_STATUS + reg_offset);
+ uint32_t doorbell_off = RREG32(regSDMA_RLC0_DOORBELL_OFFSET + reg_offset);
+ bool is_active = !!REG_GET_FIELD(status, SDMA_RLC0_CONTEXT_STATUS, SELECTED);
+
+ return is_active ? doorbell_off >> 2 : 0;
+}
+
+const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = {
+ .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_gfx_v9_4_3_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_gfx_v9_init_interrupts,
+ .hqd_load = kgd_gfx_v9_4_3_hqd_load,
+ .hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load,
+ .hqd_sdma_load = kgd_gfx_v9_4_3_hqd_sdma_load,
+ .hqd_dump = kgd_gfx_v9_hqd_dump,
+ .hqd_sdma_dump = kgd_gfx_v9_4_3_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_gfx_v9_4_3_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_gfx_v9_hqd_destroy,
+ .hqd_sdma_destroy = kgd_gfx_v9_4_3_hqd_sdma_destroy,
+ .wave_control_execute = kgd_gfx_v9_wave_control_execute,
+ .get_atc_vmid_pasid_mapping_info =
+ kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
+ .set_vm_context_page_table_base =
+ kgd_gfx_v9_set_vm_context_page_table_base,
+ .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
+ .program_trap_handler_settings =
+ kgd_gfx_v9_program_trap_handler_settings,
+ .build_dequeue_wait_counts_packet_info =
+ kgd_gfx_v9_build_dequeue_wait_counts_packet_info,
+ .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
+ .enable_debug_trap = kgd_aldebaran_enable_debug_trap,
+ .disable_debug_trap = kgd_gfx_v9_4_3_disable_debug_trap,
+ .validate_trap_override_request =
+ kgd_gfx_v9_4_3_validate_trap_override_request,
+ .set_wave_launch_trap_override =
+ kgd_gfx_v9_4_3_set_wave_launch_trap_override,
+ .set_wave_launch_mode = kgd_aldebaran_set_wave_launch_mode,
+ .set_address_watch = kgd_gfx_v9_4_3_set_address_watch,
+ .clear_address_watch = kgd_gfx_v9_4_3_clear_address_watch,
+ .hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v9_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v9_4_3_hqd_sdma_get_doorbell
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
new file mode 100644
index 000000000000..0239114fb6c4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -0,0 +1,1123 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_amdkfd_gfx_v10.h"
+#include "gc/gc_10_1_0_offset.h"
+#include "gc/gc_10_1_0_sh_mask.h"
+#include "athub/athub_2_0_0_offset.h"
+#include "athub/athub_2_0_0_sh_mask.h"
+#include "oss/osssys_5_0_0_offset.h"
+#include "oss/osssys_5_0_0_sh_mask.h"
+#include "soc15_common.h"
+#include "v10_structs.h"
+#include "nv.h"
+#include "nvd.h"
+#include <uapi/linux/kfd_ioctl.h>
+
+enum hqd_dequeue_request_type {
+ NO_ACTION = 0,
+ DRAIN_PIPE,
+ RESET_WAVES,
+ SAVE_WAVES
+};
+
+static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
+ uint32_t queue, uint32_t vmid)
+{
+ mutex_lock(&adev->srbm_mutex);
+ nv_grbm_select(adev, mec, pipe, queue, vmid);
+}
+
+static void unlock_srbm(struct amdgpu_device *adev)
+{
+ nv_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+}
+
+static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+ lock_srbm(adev, mec, pipe, queue_id, 0);
+}
+
+static uint64_t get_queue_mask(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id)
+{
+ unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
+ queue_id;
+
+ return 1ull << bit;
+}
+
+static void release_queue(struct amdgpu_device *adev)
+{
+ unlock_srbm(adev);
+}
+
+static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
+ uint32_t sh_mem_config,
+ uint32_t sh_mem_ape1_base,
+ uint32_t sh_mem_ape1_limit,
+ uint32_t sh_mem_bases, uint32_t inst)
+{
+ lock_srbm(adev, 0, 0, 0, vmid);
+
+ WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
+ WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
+ /* APE1 no longer exists on GFX9 */
+
+ unlock_srbm(adev);
+}
+
+static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
+ unsigned int vmid, uint32_t inst)
+{
+ /*
+ * We have to assume that there is no outstanding mapping.
+ * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
+ * a mapping is in progress or because a mapping finished
+ * and the SW cleared it.
+ * So the protocol is to always wait & clear.
+ */
+ uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
+ ATC_VMID0_PASID_MAPPING__VALID_MASK;
+
+ pr_debug("pasid 0x%x vmid %d, reg value %x\n", pasid, vmid, pasid_mapping);
+
+ pr_debug("ATHUB, reg %x\n", SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid);
+ WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid,
+ pasid_mapping);
+
+#if 0
+ /* TODO: uncomment this code when the hardware support is ready. */
+ while (!(RREG32(SOC15_REG_OFFSET(
+ ATHUB, 0,
+ mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
+ (1U << vmid)))
+ cpu_relax();
+
+ pr_debug("ATHUB mapping update finished\n");
+ WREG32(SOC15_REG_OFFSET(ATHUB, 0,
+ mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
+ 1U << vmid);
+#endif
+
+ /* Mapping vmid to pasid also for IH block */
+ pr_debug("update mapping for IH block and mmhub");
+ WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid,
+ pasid_mapping);
+
+ return 0;
+}
+
+/* TODO - RING0 form of field is obsolete, seems to date back to SI
+ * but still works
+ */
+
+static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id,
+ uint32_t inst)
+{
+ uint32_t mec;
+ uint32_t pipe;
+
+ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+ lock_srbm(adev, mec, pipe, 0, 0);
+
+ WREG32_SOC15(GC, 0, mmCPC_INT_CNTL,
+ CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
+ CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
+
+ unlock_srbm(adev);
+
+ return 0;
+}
+
+static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
+ unsigned int engine_id,
+ unsigned int queue_id)
+{
+ uint32_t sdma_engine_reg_base[2] = {
+ SOC15_REG_OFFSET(SDMA0, 0,
+ mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
+ /* On gfx10, mmSDMA1_xxx registers are defined NOT based
+ * on SDMA1 base address (dw 0x1860) but based on SDMA0
+ * base address (dw 0x1260). Therefore use mmSDMA0_RLC0_RB_CNTL
+ * instead of mmSDMA1_RLC0_RB_CNTL for the base address calc
+ * below
+ */
+ SOC15_REG_OFFSET(SDMA1, 0,
+ mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL
+ };
+
+ uint32_t retval = sdma_engine_reg_base[engine_id]
+ + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
+
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
+ queue_id, retval);
+
+ return retval;
+}
+
+#if 0
+static uint32_t get_watch_base_addr(struct amdgpu_device *adev)
+{
+ uint32_t retval = SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_H) -
+ mmTCP_WATCH0_ADDR_H;
+
+ pr_debug("kfd: reg watch base address: 0x%x\n", retval);
+
+ return retval;
+}
+#endif
+
+static inline struct v10_compute_mqd *get_mqd(void *mqd)
+{
+ return (struct v10_compute_mqd *)mqd;
+}
+
+static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd)
+{
+ return (struct v10_sdma_mqd *)mqd;
+}
+
+static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t __user *wptr, uint32_t wptr_shift,
+ uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst)
+{
+ struct v10_compute_mqd *m;
+ uint32_t *mqd_hqd;
+ uint32_t reg, hqd_base, data;
+
+ m = get_mqd(mqd);
+
+ pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
+
+ /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
+ mqd_hqd = &m->cp_mqd_base_addr_lo;
+ hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
+
+ for (reg = hqd_base;
+ reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
+ WREG32_SOC15_IP(GC, reg, mqd_hqd[reg - hqd_base]);
+
+
+ /* Activate doorbell logic before triggering WPTR poll. */
+ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
+ CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, data);
+
+ if (wptr) {
+ /* Don't read wptr with get_user because the user
+ * context may not be accessible (if this function
+ * runs in a work queue). Instead trigger a one-shot
+ * polling read from memory in the CP. This assumes
+ * that wptr is GPU-accessible in the queue's VMID via
+ * ATC or SVM. WPTR==RPTR before starting the poll so
+ * the CP starts fetching new commands from the right
+ * place.
+ *
+ * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
+ * tricky. Assume that the queue didn't overflow. The
+ * number of valid bits in the 32-bit RPTR depends on
+ * the queue size. The remaining bits are taken from
+ * the saved 64-bit WPTR. If the WPTR wrapped, add the
+ * queue size.
+ */
+ uint32_t queue_size =
+ 2 << REG_GET_FIELD(m->cp_hqd_pq_control,
+ CP_HQD_PQ_CONTROL, QUEUE_SIZE);
+ uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
+
+ if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
+ guessed_wptr += queue_size;
+ guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
+ guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
+
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
+ lower_32_bits(guessed_wptr));
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
+ upper_32_bits(guessed_wptr));
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
+ lower_32_bits((uint64_t)wptr));
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
+ upper_32_bits((uint64_t)wptr));
+ pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__,
+ (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
+ WREG32_SOC15(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1,
+ (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
+ }
+
+ /* Start the EOP fetcher */
+ WREG32_SOC15(GC, 0, mmCP_HQD_EOP_RPTR,
+ REG_SET_FIELD(m->cp_hqd_eop_rptr,
+ CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
+
+ data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
+ WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, data);
+
+ release_queue(adev);
+
+ return 0;
+}
+
+static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t doorbell_off, uint32_t inst)
+{
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
+ struct v10_compute_mqd *m;
+ uint32_t mec, pipe;
+ int r;
+
+ m = get_mqd(mqd);
+
+ acquire_queue(adev, pipe_id, queue_id);
+
+ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+ pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
+ mec, pipe, queue_id);
+
+ spin_lock(&adev->gfx.kiq[0].ring_lock);
+ r = amdgpu_ring_alloc(kiq_ring, 7);
+ if (r) {
+ pr_err("Failed to alloc KIQ (%d).\n", r);
+ goto out_unlock;
+ }
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
+ PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
+ PACKET3_MAP_QUEUES_QUEUE(queue_id) |
+ PACKET3_MAP_QUEUES_PIPE(pipe) |
+ PACKET3_MAP_QUEUES_ME((mec - 1)) |
+ PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
+ PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
+ PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
+ PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
+ amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
+ amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
+ amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
+ amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
+ amdgpu_ring_commit(kiq_ring);
+
+out_unlock:
+ spin_unlock(&adev->gfx.kiq[0].ring_lock);
+ release_queue(adev);
+
+ return r;
+}
+
+static int kgd_hqd_dump(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst)
+{
+ uint32_t i = 0, reg;
+#define HQD_N_REGS 56
+#define DUMP_REG(addr) do { \
+ if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
+ break; \
+ (*dump)[i][0] = (addr) << 2; \
+ (*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \
+ } while (0)
+
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ acquire_queue(adev, pipe_id, queue_id);
+
+ for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
+ reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
+ DUMP_REG(reg);
+
+ release_queue(adev);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
+
+ return 0;
+}
+
+static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm)
+{
+ struct v10_sdma_mqd *m;
+ uint32_t sdma_rlc_reg_offset;
+ unsigned long end_jiffies;
+ uint32_t data;
+ uint64_t data64;
+ uint64_t __user *wptr64 = (uint64_t __user *)wptr;
+
+ m = get_sdma_mqd(mqd);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
+ m->sdma_queue_id);
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
+
+ end_jiffies = msecs_to_jiffies(2000) + jiffies;
+ while (true) {
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
+ if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+ break;
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
+ return -ETIME;
+ }
+ usleep_range(500, 1000);
+ }
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
+ m->sdmax_rlcx_doorbell_offset);
+
+ data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
+ ENABLE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
+ m->sdmax_rlcx_rb_rptr_hi);
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
+ if (read_user_wptr(mm, wptr64, data64)) {
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
+ lower_32_bits(data64));
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
+ upper_32_bits(data64));
+ } else {
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
+ m->sdmax_rlcx_rb_rptr_hi);
+ }
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
+ m->sdmax_rlcx_rb_base_hi);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ m->sdmax_rlcx_rb_rptr_addr_lo);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ m->sdmax_rlcx_rb_rptr_addr_hi);
+
+ data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
+ RB_ENABLE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
+
+ return 0;
+}
+
+static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs)
+{
+ uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
+ engine_id, queue_id);
+ uint32_t i = 0, reg;
+#undef HQD_N_REGS
+#define HQD_N_REGS (19+6+7+10)
+
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+ for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+ for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
+ reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+ for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
+ reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
+
+ return 0;
+}
+
+static bool kgd_hqd_is_occupied(struct amdgpu_device *adev,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t inst)
+{
+ uint32_t act;
+ bool retval = false;
+ uint32_t low, high;
+
+ acquire_queue(adev, pipe_id, queue_id);
+ act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
+ if (act) {
+ low = lower_32_bits(queue_address >> 8);
+ high = upper_32_bits(queue_address >> 8);
+
+ if (low == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE) &&
+ high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI))
+ retval = true;
+ }
+ release_queue(adev);
+ return retval;
+}
+
+static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
+{
+ struct v10_sdma_mqd *m;
+ uint32_t sdma_rlc_reg_offset;
+ uint32_t sdma_rlc_rb_cntl;
+
+ m = get_sdma_mqd(mqd);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
+ m->sdma_queue_id);
+
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
+
+ if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
+ return true;
+
+ return false;
+}
+
+static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd,
+ enum kfd_preempt_type reset_type,
+ unsigned int utimeout, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t inst)
+{
+ enum hqd_dequeue_request_type type;
+ unsigned long end_jiffies;
+ uint32_t temp;
+ struct v10_compute_mqd *m = get_mqd(mqd);
+
+ if (amdgpu_in_reset(adev))
+ return -EIO;
+
+#if 0
+ unsigned long flags;
+ int retry;
+#endif
+
+ acquire_queue(adev, pipe_id, queue_id);
+
+ if (m->cp_hqd_vmid == 0)
+ WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
+
+ switch (reset_type) {
+ case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
+ type = DRAIN_PIPE;
+ break;
+ case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
+ type = RESET_WAVES;
+ break;
+ case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
+ type = SAVE_WAVES;
+ break;
+ default:
+ type = DRAIN_PIPE;
+ break;
+ }
+
+#if 0 /* Is this still needed? */
+ /* Workaround: If IQ timer is active and the wait time is close to or
+ * equal to 0, dequeueing is not safe. Wait until either the wait time
+ * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
+ * cleared before continuing. Also, ensure wait times are set to at
+ * least 0x3.
+ */
+ local_irq_save(flags);
+ preempt_disable();
+ retry = 5000; /* wait for 500 usecs at maximum */
+ while (true) {
+ temp = RREG32(mmCP_HQD_IQ_TIMER);
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
+ pr_debug("HW is processing IQ\n");
+ goto loop;
+ }
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
+ == 3) /* SEM-rearm is safe */
+ break;
+ /* Wait time 3 is safe for CP, but our MMIO read/write
+ * time is close to 1 microsecond, so check for 10 to
+ * leave more buffer room
+ */
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
+ >= 10)
+ break;
+ pr_debug("IQ timer is active\n");
+ } else
+ break;
+loop:
+ if (!retry) {
+ pr_err("CP HQD IQ timer status time out\n");
+ break;
+ }
+ ndelay(100);
+ --retry;
+ }
+ retry = 1000;
+ while (true) {
+ temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
+ if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
+ break;
+ pr_debug("Dequeue request is pending\n");
+
+ if (!retry) {
+ pr_err("CP HQD dequeue request time out\n");
+ break;
+ }
+ ndelay(100);
+ --retry;
+ }
+ local_irq_restore(flags);
+ preempt_enable();
+#endif
+
+ WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, type);
+
+ end_jiffies = (utimeout * HZ / 1000) + jiffies;
+ while (true) {
+ temp = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
+ if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
+ break;
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("cp queue preemption time out.\n");
+ release_queue(adev);
+ return -ETIME;
+ }
+ usleep_range(500, 1000);
+ }
+
+ release_queue(adev);
+ return 0;
+}
+
+static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
+ unsigned int utimeout)
+{
+ struct v10_sdma_mqd *m;
+ uint32_t sdma_rlc_reg_offset;
+ uint32_t temp;
+ unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
+
+ m = get_sdma_mqd(mqd);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
+ m->sdma_queue_id);
+
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
+ temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
+
+ while (true) {
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
+ if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+ break;
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
+ return -ETIME;
+ }
+ usleep_range(500, 1000);
+ }
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
+ SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
+
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr_hi =
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
+
+ return 0;
+}
+
+static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
+ uint8_t vmid, uint16_t *p_pasid)
+{
+ uint32_t value;
+
+ value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ + vmid);
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
+}
+
+static int kgd_wave_control_execute(struct amdgpu_device *adev,
+ uint32_t gfx_index_val,
+ uint32_t sq_cmd, uint32_t inst)
+{
+ uint32_t data = 0;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val);
+ WREG32_SOC15(GC, 0, mmSQ_CMD, sq_cmd);
+
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+ INSTANCE_BROADCAST_WRITES, 1);
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+ SA_BROADCAST_WRITES, 1);
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+ SE_BROADCAST_WRITES, 1);
+
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+static void set_vm_context_page_table_base(struct amdgpu_device *adev,
+ uint32_t vmid, uint64_t page_table_base)
+{
+ if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
+ pr_err("trying to set page table base for wrong VMID %u\n",
+ vmid);
+ return;
+ }
+
+ /* SDMA is on gfxhub as well for Navi1* series */
+ adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
+}
+
+/*
+ * GFX10 helper for wave launch stall requirements on debug trap setting.
+ *
+ * vmid:
+ * Target VMID to stall/unstall.
+ *
+ * stall:
+ * 0-unstall wave launch (enable), 1-stall wave launch (disable).
+ * After wavefront launch has been stalled, allocated waves must drain from
+ * SPI in order for debug trap settings to take effect on those waves.
+ * This is roughly a ~3500 clock cycle wait on SPI where a read on
+ * SPI_GDBG_WAVE_CNTL translates to ~32 clock cycles.
+ * KGD_GFX_V10_WAVE_LAUNCH_SPI_DRAIN_LATENCY indicates the number of reads required.
+ *
+ * NOTE: We can afford to clear the entire STALL_VMID field on unstall
+ * because current GFX10 chips cannot support multi-process debugging due to
+ * trap configuration and masking being limited to global scope. Always
+ * assume single process conditions.
+ *
+ */
+
+#define KGD_GFX_V10_WAVE_LAUNCH_SPI_DRAIN_LATENCY 110
+static void kgd_gfx_v10_set_wave_launch_stall(struct amdgpu_device *adev, uint32_t vmid, bool stall)
+{
+ uint32_t data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
+ int i;
+
+ data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_VMID,
+ stall ? 1 << vmid : 0);
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data);
+
+ if (!stall)
+ return;
+
+ for (i = 0; i < KGD_GFX_V10_WAVE_LAUNCH_SPI_DRAIN_LATENCY; i++)
+ RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
+}
+
+uint32_t kgd_gfx_v10_enable_debug_trap(struct amdgpu_device *adev,
+ bool restore_dbg_registers,
+ uint32_t vmid)
+{
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ kgd_gfx_v10_set_wave_launch_stall(adev, vmid, true);
+
+ /* assume gfx off is disabled for the debug session if rlc restore not supported. */
+ if (restore_dbg_registers) {
+ uint32_t data = 0;
+
+ data = REG_SET_FIELD(data, SPI_GDBG_TRAP_CONFIG,
+ VMID_SEL, 1 << vmid);
+ data = REG_SET_FIELD(data, SPI_GDBG_TRAP_CONFIG,
+ TRAP_EN, 1);
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_CONFIG), data);
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA0), 0);
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_DATA1), 0);
+
+ kgd_gfx_v10_set_wave_launch_stall(adev, vmid, false);
+
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+ }
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
+
+ kgd_gfx_v10_set_wave_launch_stall(adev, vmid, false);
+
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+uint32_t kgd_gfx_v10_disable_debug_trap(struct amdgpu_device *adev,
+ bool keep_trap_enabled,
+ uint32_t vmid)
+{
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ kgd_gfx_v10_set_wave_launch_stall(adev, vmid, true);
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
+
+ kgd_gfx_v10_set_wave_launch_stall(adev, vmid, false);
+
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+int kgd_gfx_v10_validate_trap_override_request(struct amdgpu_device *adev,
+ uint32_t trap_override,
+ uint32_t *trap_mask_supported)
+{
+ *trap_mask_supported &= KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH;
+
+ /* The SPI_GDBG_TRAP_MASK register is global and affects all
+ * processes. Only allow OR-ing the address-watch bit, since
+ * this only affects processes under the debugger. Other bits
+ * should stay 0 to avoid the debugger interfering with other
+ * processes.
+ */
+ if (trap_override != KFD_DBG_TRAP_OVERRIDE_OR)
+ return -EINVAL;
+
+ return 0;
+}
+
+uint32_t kgd_gfx_v10_set_wave_launch_trap_override(struct amdgpu_device *adev,
+ uint32_t vmid,
+ uint32_t trap_override,
+ uint32_t trap_mask_bits,
+ uint32_t trap_mask_request,
+ uint32_t *trap_mask_prev,
+ uint32_t kfd_dbg_trap_cntl_prev)
+{
+ uint32_t data, wave_cntl_prev;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ wave_cntl_prev = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
+
+ kgd_gfx_v10_set_wave_launch_stall(adev, vmid, true);
+
+ data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK));
+ *trap_mask_prev = REG_GET_FIELD(data, SPI_GDBG_TRAP_MASK, EXCP_EN);
+
+ trap_mask_bits = (trap_mask_bits & trap_mask_request) |
+ (*trap_mask_prev & ~trap_mask_request);
+
+ data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, EXCP_EN, trap_mask_bits);
+ data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, REPLACE, trap_override);
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data);
+
+ /* We need to preserve wave launch mode stall settings. */
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), wave_cntl_prev);
+
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+uint32_t kgd_gfx_v10_set_wave_launch_mode(struct amdgpu_device *adev,
+ uint8_t wave_launch_mode,
+ uint32_t vmid)
+{
+ uint32_t data = 0;
+ bool is_mode_set = !!wave_launch_mode;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ kgd_gfx_v10_set_wave_launch_stall(adev, vmid, true);
+
+ data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2,
+ VMID_MASK, is_mode_set ? 1 << vmid : 0);
+ data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2,
+ MODE, is_mode_set ? wave_launch_mode : 0);
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL2), data);
+
+ kgd_gfx_v10_set_wave_launch_stall(adev, vmid, false);
+
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+#define TCP_WATCH_STRIDE (mmTCP_WATCH1_ADDR_H - mmTCP_WATCH0_ADDR_H)
+#define SQ_WATCH_STRIDE (mmSQ_WATCH1_ADDR_H - mmSQ_WATCH0_ADDR_H)
+uint32_t kgd_gfx_v10_set_address_watch(struct amdgpu_device *adev,
+ uint64_t watch_address,
+ uint32_t watch_address_mask,
+ uint32_t watch_id,
+ uint32_t watch_mode,
+ uint32_t debug_vmid,
+ uint32_t inst)
+{
+ /* SQ_WATCH?_ADDR_* and TCP_WATCH?_ADDR_* are programmed with the
+ * same values.
+ */
+ uint32_t watch_address_high;
+ uint32_t watch_address_low;
+ uint32_t tcp_watch_address_cntl;
+ uint32_t sq_watch_address_cntl;
+
+ watch_address_low = lower_32_bits(watch_address);
+ watch_address_high = upper_32_bits(watch_address) & 0xffff;
+
+ tcp_watch_address_cntl = 0;
+ tcp_watch_address_cntl = REG_SET_FIELD(tcp_watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ VMID,
+ debug_vmid);
+ tcp_watch_address_cntl = REG_SET_FIELD(tcp_watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ MODE,
+ watch_mode);
+ tcp_watch_address_cntl = REG_SET_FIELD(tcp_watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ MASK,
+ watch_address_mask >> 7);
+
+ sq_watch_address_cntl = 0;
+ sq_watch_address_cntl = REG_SET_FIELD(sq_watch_address_cntl,
+ SQ_WATCH0_CNTL,
+ VMID,
+ debug_vmid);
+ sq_watch_address_cntl = REG_SET_FIELD(sq_watch_address_cntl,
+ SQ_WATCH0_CNTL,
+ MODE,
+ watch_mode);
+ sq_watch_address_cntl = REG_SET_FIELD(sq_watch_address_cntl,
+ SQ_WATCH0_CNTL,
+ MASK,
+ watch_address_mask >> 6);
+
+ /* Turning off this watch point until we set all the registers */
+ tcp_watch_address_cntl = REG_SET_FIELD(tcp_watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ VALID,
+ 0);
+ WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ tcp_watch_address_cntl);
+
+ sq_watch_address_cntl = REG_SET_FIELD(sq_watch_address_cntl,
+ SQ_WATCH0_CNTL,
+ VALID,
+ 0);
+ WREG32((SOC15_REG_OFFSET(GC, 0, mmSQ_WATCH0_CNTL) +
+ (watch_id * SQ_WATCH_STRIDE)),
+ sq_watch_address_cntl);
+
+ /* Program {TCP,SQ}_WATCH?_ADDR* */
+ WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_H) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_high);
+ WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_L) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_low);
+
+ WREG32((SOC15_REG_OFFSET(GC, 0, mmSQ_WATCH0_ADDR_H) +
+ (watch_id * SQ_WATCH_STRIDE)),
+ watch_address_high);
+ WREG32((SOC15_REG_OFFSET(GC, 0, mmSQ_WATCH0_ADDR_L) +
+ (watch_id * SQ_WATCH_STRIDE)),
+ watch_address_low);
+
+ /* Enable the watch point */
+ tcp_watch_address_cntl = REG_SET_FIELD(tcp_watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ VALID,
+ 1);
+ WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ tcp_watch_address_cntl);
+
+ sq_watch_address_cntl = REG_SET_FIELD(sq_watch_address_cntl,
+ SQ_WATCH0_CNTL,
+ VALID,
+ 1);
+ WREG32((SOC15_REG_OFFSET(GC, 0, mmSQ_WATCH0_CNTL) +
+ (watch_id * SQ_WATCH_STRIDE)),
+ sq_watch_address_cntl);
+
+ return 0;
+}
+
+uint32_t kgd_gfx_v10_clear_address_watch(struct amdgpu_device *adev,
+ uint32_t watch_id)
+{
+ uint32_t watch_address_cntl;
+
+ watch_address_cntl = 0;
+
+ WREG32((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_cntl);
+
+ WREG32((SOC15_REG_OFFSET(GC, 0, mmSQ_WATCH0_CNTL) +
+ (watch_id * SQ_WATCH_STRIDE)),
+ watch_address_cntl);
+
+ return 0;
+}
+#undef TCP_WATCH_STRIDE
+#undef SQ_WATCH_STRIDE
+
+
+/* kgd_gfx_v10_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values
+ * The values read are:
+ * ib_offload_wait_time -- Wait Count for Indirect Buffer Offloads.
+ * atomic_offload_wait_time -- Wait Count for L2 and GDS Atomics Offloads.
+ * wrm_offload_wait_time -- Wait Count for WAIT_REG_MEM Offloads.
+ * gws_wait_time -- Wait Count for Global Wave Syncs.
+ * que_sleep_wait_time -- Wait Count for Dequeue Retry.
+ * sch_wave_wait_time -- Wait Count for Scheduling Wave Message.
+ * sem_rearm_wait_time -- Wait Count for Semaphore re-arm.
+ * deq_retry_wait_time -- Wait Count for Global Wave Syncs.
+ */
+void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev,
+ uint32_t *wait_times,
+ uint32_t inst)
+
+{
+ *wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2));
+}
+
+void kgd_gfx_v10_build_dequeue_wait_counts_packet_info(struct amdgpu_device *adev,
+ uint32_t wait_times,
+ uint32_t sch_wave,
+ uint32_t que_sleep,
+ uint32_t *reg_offset,
+ uint32_t *reg_data)
+{
+ *reg_data = wait_times;
+
+ if (sch_wave)
+ *reg_data = REG_SET_FIELD(*reg_data,
+ CP_IQ_WAIT_TIME2,
+ SCH_WAVE,
+ sch_wave);
+ if (que_sleep)
+ *reg_data = REG_SET_FIELD(*reg_data,
+ CP_IQ_WAIT_TIME2,
+ QUE_SLEEP,
+ que_sleep);
+
+ *reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2);
+}
+
+static void program_trap_handler_settings(struct amdgpu_device *adev,
+ uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr,
+ uint32_t inst)
+{
+ lock_srbm(adev, 0, 0, 0, vmid);
+
+ /*
+ * Program TBA registers
+ */
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
+ lower_32_bits(tba_addr >> 8));
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
+ upper_32_bits(tba_addr >> 8) |
+ (1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT));
+
+ /*
+ * Program TMA registers
+ */
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
+ lower_32_bits(tma_addr >> 8));
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
+ upper_32_bits(tma_addr >> 8));
+
+ unlock_srbm(adev);
+}
+
+uint64_t kgd_gfx_v10_hqd_get_pq_addr(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst)
+{
+ return 0;
+}
+
+uint64_t kgd_gfx_v10_hqd_reset(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst, unsigned int utimeout)
+{
+ return 0;
+}
+
+uint32_t kgd_gfx_v10_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+{
+ return 0;
+}
+
+const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
+ .program_sh_mem_settings = kgd_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_init_interrupts,
+ .hqd_load = kgd_hqd_load,
+ .hiq_mqd_load = kgd_hiq_mqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .wave_control_execute = kgd_wave_control_execute,
+ .get_atc_vmid_pasid_mapping_info =
+ get_atc_vmid_pasid_mapping_info,
+ .set_vm_context_page_table_base = set_vm_context_page_table_base,
+ .enable_debug_trap = kgd_gfx_v10_enable_debug_trap,
+ .disable_debug_trap = kgd_gfx_v10_disable_debug_trap,
+ .validate_trap_override_request = kgd_gfx_v10_validate_trap_override_request,
+ .set_wave_launch_trap_override = kgd_gfx_v10_set_wave_launch_trap_override,
+ .set_wave_launch_mode = kgd_gfx_v10_set_wave_launch_mode,
+ .set_address_watch = kgd_gfx_v10_set_address_watch,
+ .clear_address_watch = kgd_gfx_v10_clear_address_watch,
+ .get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times,
+ .build_dequeue_wait_counts_packet_info = kgd_gfx_v10_build_dequeue_wait_counts_packet_info,
+ .program_trap_handler_settings = program_trap_handler_settings,
+ .hqd_get_pq_addr = kgd_gfx_v10_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v10_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v10_hqd_sdma_get_doorbell
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
new file mode 100644
index 000000000000..a4c607c88178
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+uint32_t kgd_gfx_v10_enable_debug_trap(struct amdgpu_device *adev,
+ bool restore_dbg_registers,
+ uint32_t vmid);
+uint32_t kgd_gfx_v10_disable_debug_trap(struct amdgpu_device *adev,
+ bool keep_trap_enabled,
+ uint32_t vmid);
+int kgd_gfx_v10_validate_trap_override_request(struct amdgpu_device *adev,
+ uint32_t trap_override,
+ uint32_t *trap_mask_supported);
+uint32_t kgd_gfx_v10_set_wave_launch_trap_override(struct amdgpu_device *adev,
+ uint32_t vmid,
+ uint32_t trap_override,
+ uint32_t trap_mask_bits,
+ uint32_t trap_mask_request,
+ uint32_t *trap_mask_prev,
+ uint32_t kfd_dbg_trap_cntl_prev);
+uint32_t kgd_gfx_v10_set_wave_launch_mode(struct amdgpu_device *adev,
+ uint8_t wave_launch_mode,
+ uint32_t vmid);
+uint32_t kgd_gfx_v10_set_address_watch(struct amdgpu_device *adev,
+ uint64_t watch_address,
+ uint32_t watch_address_mask,
+ uint32_t watch_id,
+ uint32_t watch_mode,
+ uint32_t debug_vmid,
+ uint32_t inst);
+uint32_t kgd_gfx_v10_clear_address_watch(struct amdgpu_device *adev,
+ uint32_t watch_id);
+void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev,
+ uint32_t *wait_times,
+ uint32_t inst);
+void kgd_gfx_v10_build_dequeue_wait_counts_packet_info(struct amdgpu_device *adev,
+ uint32_t wait_times,
+ uint32_t sch_wave,
+ uint32_t que_sleep,
+ uint32_t *reg_offset,
+ uint32_t *reg_data);
+uint64_t kgd_gfx_v10_hqd_get_pq_addr(struct amdgpu_device *adev,
+ uint32_t pipe_id,
+ uint32_t queue_id,
+ uint32_t inst);
+uint64_t kgd_gfx_v10_hqd_reset(struct amdgpu_device *adev,
+ uint32_t pipe_id,
+ uint32_t queue_id,
+ uint32_t inst,
+ unsigned int utimeout);
+uint32_t kgd_gfx_v10_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
new file mode 100644
index 000000000000..f2278a0937ff
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
@@ -0,0 +1,687 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <linux/mmu_context.h>
+#include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_amdkfd_gfx_v10.h"
+#include "gc/gc_10_3_0_offset.h"
+#include "gc/gc_10_3_0_sh_mask.h"
+#include "oss/osssys_5_0_0_offset.h"
+#include "oss/osssys_5_0_0_sh_mask.h"
+#include "athub/athub_2_1_0_offset.h"
+#include "athub/athub_2_1_0_sh_mask.h"
+#include "soc15_common.h"
+#include "v10_structs.h"
+#include "nv.h"
+#include "nvd.h"
+
+enum hqd_dequeue_request_type {
+ NO_ACTION = 0,
+ DRAIN_PIPE,
+ RESET_WAVES,
+ SAVE_WAVES
+};
+
+static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
+ uint32_t queue, uint32_t vmid)
+{
+ mutex_lock(&adev->srbm_mutex);
+ nv_grbm_select(adev, mec, pipe, queue, vmid);
+}
+
+static void unlock_srbm(struct amdgpu_device *adev)
+{
+ nv_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+}
+
+static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+ lock_srbm(adev, mec, pipe, queue_id, 0);
+}
+
+static uint64_t get_queue_mask(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id)
+{
+ unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
+ queue_id;
+
+ return 1ull << bit;
+}
+
+static void release_queue(struct amdgpu_device *adev)
+{
+ unlock_srbm(adev);
+}
+
+static void program_sh_mem_settings_v10_3(struct amdgpu_device *adev, uint32_t vmid,
+ uint32_t sh_mem_config,
+ uint32_t sh_mem_ape1_base,
+ uint32_t sh_mem_ape1_limit,
+ uint32_t sh_mem_bases, uint32_t inst)
+{
+ lock_srbm(adev, 0, 0, 0, vmid);
+
+ WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
+ WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
+ /* APE1 no longer exists on GFX9 */
+
+ unlock_srbm(adev);
+}
+
+/* ATC is defeatured on Sienna_Cichlid */
+static int set_pasid_vmid_mapping_v10_3(struct amdgpu_device *adev, unsigned int pasid,
+ unsigned int vmid, uint32_t inst)
+{
+ uint32_t value = pasid << IH_VMID_0_LUT__PASID__SHIFT;
+
+ /* Mapping vmid to pasid also for IH block */
+ pr_debug("mapping vmid %d -> pasid %d in IH block for GFX client\n",
+ vmid, pasid);
+ WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid, value);
+
+ return 0;
+}
+
+static int init_interrupts_v10_3(struct amdgpu_device *adev, uint32_t pipe_id,
+ uint32_t inst)
+{
+ uint32_t mec;
+ uint32_t pipe;
+
+ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+ lock_srbm(adev, mec, pipe, 0, 0);
+
+ WREG32_SOC15(GC, 0, mmCPC_INT_CNTL,
+ CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
+ CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
+
+ unlock_srbm(adev);
+
+ return 0;
+}
+
+static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
+ unsigned int engine_id,
+ unsigned int queue_id)
+{
+ uint32_t sdma_engine_reg_base = 0;
+ uint32_t sdma_rlc_reg_offset;
+
+ switch (engine_id) {
+ default:
+ dev_warn(adev->dev,
+ "Invalid sdma engine id (%d), using engine id 0\n",
+ engine_id);
+ fallthrough;
+ case 0:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+ mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
+ break;
+ case 1:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+ mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
+ break;
+ case 2:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+ mmSDMA2_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
+ break;
+ case 3:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+ mmSDMA3_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
+ break;
+ }
+
+ sdma_rlc_reg_offset = sdma_engine_reg_base
+ + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
+
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
+ queue_id, sdma_rlc_reg_offset);
+
+ return sdma_rlc_reg_offset;
+}
+
+static inline struct v10_compute_mqd *get_mqd(void *mqd)
+{
+ return (struct v10_compute_mqd *)mqd;
+}
+
+static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd)
+{
+ return (struct v10_sdma_mqd *)mqd;
+}
+
+static int hqd_load_v10_3(struct amdgpu_device *adev, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t __user *wptr, uint32_t wptr_shift,
+ uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst)
+{
+ struct v10_compute_mqd *m;
+ uint32_t *mqd_hqd;
+ uint32_t reg, hqd_base, data;
+
+ m = get_mqd(mqd);
+
+ pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
+
+ /* HIQ is set during driver init period with vmid set to 0*/
+ if (m->cp_hqd_vmid == 0) {
+ uint32_t value, mec, pipe;
+
+ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+ pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
+ mec, pipe, queue_id);
+ value = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS);
+ value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
+ ((mec << 5) | (pipe << 3) | queue_id | 0x80));
+ WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS, value);
+ }
+
+ /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
+ mqd_hqd = &m->cp_mqd_base_addr_lo;
+ hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
+
+ for (reg = hqd_base;
+ reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
+ WREG32_SOC15_IP(GC, reg, mqd_hqd[reg - hqd_base]);
+
+
+ /* Activate doorbell logic before triggering WPTR poll. */
+ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
+ CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, data);
+
+ if (wptr) {
+ /* Don't read wptr with get_user because the user
+ * context may not be accessible (if this function
+ * runs in a work queue). Instead trigger a one-shot
+ * polling read from memory in the CP. This assumes
+ * that wptr is GPU-accessible in the queue's VMID via
+ * ATC or SVM. WPTR==RPTR before starting the poll so
+ * the CP starts fetching new commands from the right
+ * place.
+ *
+ * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
+ * tricky. Assume that the queue didn't overflow. The
+ * number of valid bits in the 32-bit RPTR depends on
+ * the queue size. The remaining bits are taken from
+ * the saved 64-bit WPTR. If the WPTR wrapped, add the
+ * queue size.
+ */
+ uint32_t queue_size =
+ 2 << REG_GET_FIELD(m->cp_hqd_pq_control,
+ CP_HQD_PQ_CONTROL, QUEUE_SIZE);
+ uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
+
+ if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
+ guessed_wptr += queue_size;
+ guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
+ guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
+
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
+ lower_32_bits(guessed_wptr));
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
+ upper_32_bits(guessed_wptr));
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
+ lower_32_bits((uint64_t)wptr));
+ WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
+ upper_32_bits((uint64_t)wptr));
+ pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__,
+ (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
+ WREG32_SOC15(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1,
+ (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
+ }
+
+ /* Start the EOP fetcher */
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR),
+ REG_SET_FIELD(m->cp_hqd_eop_rptr,
+ CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
+
+ data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
+ WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, data);
+
+ release_queue(adev);
+
+ return 0;
+}
+
+static int hiq_mqd_load_v10_3(struct amdgpu_device *adev, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t doorbell_off, uint32_t inst)
+{
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
+ struct v10_compute_mqd *m;
+ uint32_t mec, pipe;
+ int r;
+
+ m = get_mqd(mqd);
+
+ acquire_queue(adev, pipe_id, queue_id);
+
+ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+ pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
+ mec, pipe, queue_id);
+
+ spin_lock(&adev->gfx.kiq[0].ring_lock);
+ r = amdgpu_ring_alloc(kiq_ring, 7);
+ if (r) {
+ pr_err("Failed to alloc KIQ (%d).\n", r);
+ goto out_unlock;
+ }
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
+ PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
+ PACKET3_MAP_QUEUES_QUEUE(queue_id) |
+ PACKET3_MAP_QUEUES_PIPE(pipe) |
+ PACKET3_MAP_QUEUES_ME((mec - 1)) |
+ PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
+ PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
+ PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
+ PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
+ amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
+ amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
+ amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
+ amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
+ amdgpu_ring_commit(kiq_ring);
+
+out_unlock:
+ spin_unlock(&adev->gfx.kiq[0].ring_lock);
+ release_queue(adev);
+
+ return r;
+}
+
+static int hqd_dump_v10_3(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst)
+{
+ uint32_t i = 0, reg;
+#define HQD_N_REGS 56
+#define DUMP_REG(addr) do { \
+ if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
+ break; \
+ (*dump)[i][0] = (addr) << 2; \
+ (*dump)[i++][1] = RREG32_SOC15_IP(GC, addr); \
+ } while (0)
+
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ acquire_queue(adev, pipe_id, queue_id);
+
+ for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
+ reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
+ DUMP_REG(reg);
+
+ release_queue(adev);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
+
+ return 0;
+}
+
+static int hqd_sdma_load_v10_3(struct amdgpu_device *adev, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm)
+{
+ struct v10_sdma_mqd *m;
+ uint32_t sdma_rlc_reg_offset;
+ unsigned long end_jiffies;
+ uint32_t data;
+ uint64_t data64;
+ uint64_t __user *wptr64 = (uint64_t __user *)wptr;
+
+ m = get_sdma_mqd(mqd);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
+ m->sdma_queue_id);
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
+
+ end_jiffies = msecs_to_jiffies(2000) + jiffies;
+ while (true) {
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
+ if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+ break;
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
+ return -ETIME;
+ }
+ usleep_range(500, 1000);
+ }
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
+ m->sdmax_rlcx_doorbell_offset);
+
+ data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
+ ENABLE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
+ m->sdmax_rlcx_rb_rptr_hi);
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
+ if (read_user_wptr(mm, wptr64, data64)) {
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
+ lower_32_bits(data64));
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
+ upper_32_bits(data64));
+ } else {
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
+ m->sdmax_rlcx_rb_rptr_hi);
+ }
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
+ m->sdmax_rlcx_rb_base_hi);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ m->sdmax_rlcx_rb_rptr_addr_lo);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ m->sdmax_rlcx_rb_rptr_addr_hi);
+
+ data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
+ RB_ENABLE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
+
+ return 0;
+}
+
+static int hqd_sdma_dump_v10_3(struct amdgpu_device *adev,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs)
+{
+ uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
+ engine_id, queue_id);
+ uint32_t i = 0, reg;
+#undef HQD_N_REGS
+#define HQD_N_REGS (19+6+7+12)
+
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+ for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+ for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
+ reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+ for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
+ reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
+
+ return 0;
+}
+
+static bool hqd_is_occupied_v10_3(struct amdgpu_device *adev,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t inst)
+{
+ uint32_t act;
+ bool retval = false;
+ uint32_t low, high;
+
+ acquire_queue(adev, pipe_id, queue_id);
+ act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
+ if (act) {
+ low = lower_32_bits(queue_address >> 8);
+ high = upper_32_bits(queue_address >> 8);
+
+ if (low == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE) &&
+ high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI))
+ retval = true;
+ }
+ release_queue(adev);
+ return retval;
+}
+
+static bool hqd_sdma_is_occupied_v10_3(struct amdgpu_device *adev,
+ void *mqd)
+{
+ struct v10_sdma_mqd *m;
+ uint32_t sdma_rlc_reg_offset;
+ uint32_t sdma_rlc_rb_cntl;
+
+ m = get_sdma_mqd(mqd);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
+ m->sdma_queue_id);
+
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
+
+ if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
+ return true;
+
+ return false;
+}
+
+static int hqd_destroy_v10_3(struct amdgpu_device *adev, void *mqd,
+ enum kfd_preempt_type reset_type,
+ unsigned int utimeout, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t inst)
+{
+ enum hqd_dequeue_request_type type;
+ unsigned long end_jiffies;
+ uint32_t temp;
+ struct v10_compute_mqd *m = get_mqd(mqd);
+
+ acquire_queue(adev, pipe_id, queue_id);
+
+ if (m->cp_hqd_vmid == 0)
+ WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
+
+ switch (reset_type) {
+ case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
+ type = DRAIN_PIPE;
+ break;
+ case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
+ type = RESET_WAVES;
+ break;
+ case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
+ type = SAVE_WAVES;
+ break;
+ default:
+ type = DRAIN_PIPE;
+ break;
+ }
+
+ WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, type);
+
+ end_jiffies = (utimeout * HZ / 1000) + jiffies;
+ while (true) {
+ temp = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
+ if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
+ break;
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("cp queue pipe %d queue %d preemption failed\n",
+ pipe_id, queue_id);
+ release_queue(adev);
+ return -ETIME;
+ }
+ usleep_range(500, 1000);
+ }
+
+ release_queue(adev);
+ return 0;
+}
+
+static int hqd_sdma_destroy_v10_3(struct amdgpu_device *adev, void *mqd,
+ unsigned int utimeout)
+{
+ struct v10_sdma_mqd *m;
+ uint32_t sdma_rlc_reg_offset;
+ uint32_t temp;
+ unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
+
+ m = get_sdma_mqd(mqd);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
+ m->sdma_queue_id);
+
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
+ temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
+
+ while (true) {
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
+ if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+ break;
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
+ return -ETIME;
+ }
+ usleep_range(500, 1000);
+ }
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
+ SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
+
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr_hi =
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
+
+ return 0;
+}
+
+static int wave_control_execute_v10_3(struct amdgpu_device *adev,
+ uint32_t gfx_index_val,
+ uint32_t sq_cmd, uint32_t inst)
+{
+ uint32_t data = 0;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val);
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd);
+
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+ INSTANCE_BROADCAST_WRITES, 1);
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+ SA_BROADCAST_WRITES, 1);
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+ SE_BROADCAST_WRITES, 1);
+
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+static bool get_atc_vmid_pasid_mapping_info_v10_3(struct amdgpu_device *adev,
+ uint8_t vmid, uint16_t *p_pasid)
+{
+ uint32_t value;
+
+ value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ + vmid);
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
+}
+
+static void set_vm_context_page_table_base_v10_3(struct amdgpu_device *adev,
+ uint32_t vmid, uint64_t page_table_base)
+{
+ /* SDMA is on gfxhub as well for Navi1* series */
+ adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
+}
+
+static void program_trap_handler_settings_v10_3(struct amdgpu_device *adev,
+ uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr,
+ uint32_t inst)
+{
+ lock_srbm(adev, 0, 0, 0, vmid);
+
+ /*
+ * Program TBA registers
+ */
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
+ lower_32_bits(tba_addr >> 8));
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
+ upper_32_bits(tba_addr >> 8) |
+ (1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT));
+
+ /*
+ * Program TMA registers
+ */
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
+ lower_32_bits(tma_addr >> 8));
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
+ upper_32_bits(tma_addr >> 8));
+
+ unlock_srbm(adev);
+}
+
+const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
+ .program_sh_mem_settings = program_sh_mem_settings_v10_3,
+ .set_pasid_vmid_mapping = set_pasid_vmid_mapping_v10_3,
+ .init_interrupts = init_interrupts_v10_3,
+ .hqd_load = hqd_load_v10_3,
+ .hiq_mqd_load = hiq_mqd_load_v10_3,
+ .hqd_sdma_load = hqd_sdma_load_v10_3,
+ .hqd_dump = hqd_dump_v10_3,
+ .hqd_sdma_dump = hqd_sdma_dump_v10_3,
+ .hqd_is_occupied = hqd_is_occupied_v10_3,
+ .hqd_sdma_is_occupied = hqd_sdma_is_occupied_v10_3,
+ .hqd_destroy = hqd_destroy_v10_3,
+ .hqd_sdma_destroy = hqd_sdma_destroy_v10_3,
+ .wave_control_execute = wave_control_execute_v10_3,
+ .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info_v10_3,
+ .set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3,
+ .program_trap_handler_settings = program_trap_handler_settings_v10_3,
+ .get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times,
+ .build_dequeue_wait_counts_packet_info = kgd_gfx_v10_build_dequeue_wait_counts_packet_info,
+ .enable_debug_trap = kgd_gfx_v10_enable_debug_trap,
+ .disable_debug_trap = kgd_gfx_v10_disable_debug_trap,
+ .validate_trap_override_request = kgd_gfx_v10_validate_trap_override_request,
+ .set_wave_launch_trap_override = kgd_gfx_v10_set_wave_launch_trap_override,
+ .set_wave_launch_mode = kgd_gfx_v10_set_wave_launch_mode,
+ .set_address_watch = kgd_gfx_v10_set_address_watch,
+ .clear_address_watch = kgd_gfx_v10_clear_address_watch,
+ .hqd_get_pq_addr = kgd_gfx_v10_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v10_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v10_hqd_sdma_get_doorbell
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
new file mode 100644
index 000000000000..aaccf0b9947d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
@@ -0,0 +1,835 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <linux/mmu_context.h>
+#include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
+#include "gc/gc_11_0_0_offset.h"
+#include "gc/gc_11_0_0_sh_mask.h"
+#include "oss/osssys_6_0_0_offset.h"
+#include "oss/osssys_6_0_0_sh_mask.h"
+#include "soc15_common.h"
+#include "soc15d.h"
+#include "v11_structs.h"
+#include "soc21.h"
+#include <uapi/linux/kfd_ioctl.h>
+
+enum hqd_dequeue_request_type {
+ NO_ACTION = 0,
+ DRAIN_PIPE,
+ RESET_WAVES,
+ SAVE_WAVES
+};
+
+static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
+ uint32_t queue, uint32_t vmid)
+{
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, mec, pipe, queue, vmid);
+}
+
+static void unlock_srbm(struct amdgpu_device *adev)
+{
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+}
+
+static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+ lock_srbm(adev, mec, pipe, queue_id, 0);
+}
+
+static uint64_t get_queue_mask(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id)
+{
+ unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
+ queue_id;
+
+ return 1ull << bit;
+}
+
+static void release_queue(struct amdgpu_device *adev)
+{
+ unlock_srbm(adev);
+}
+
+static void program_sh_mem_settings_v11(struct amdgpu_device *adev, uint32_t vmid,
+ uint32_t sh_mem_config,
+ uint32_t sh_mem_ape1_base,
+ uint32_t sh_mem_ape1_limit,
+ uint32_t sh_mem_bases, uint32_t inst)
+{
+ lock_srbm(adev, 0, 0, 0, vmid);
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, regSH_MEM_CONFIG), sh_mem_config);
+ WREG32(SOC15_REG_OFFSET(GC, 0, regSH_MEM_BASES), sh_mem_bases);
+
+ unlock_srbm(adev);
+}
+
+static int set_pasid_vmid_mapping_v11(struct amdgpu_device *adev, unsigned int pasid,
+ unsigned int vmid, uint32_t inst)
+{
+ uint32_t value = pasid << IH_VMID_0_LUT__PASID__SHIFT;
+
+ /* Mapping vmid to pasid also for IH block */
+ pr_debug("mapping vmid %d -> pasid %d in IH block for GFX client\n",
+ vmid, pasid);
+ WREG32(SOC15_REG_OFFSET(OSSSYS, 0, regIH_VMID_0_LUT) + vmid, value);
+
+ return 0;
+}
+
+static int init_interrupts_v11(struct amdgpu_device *adev, uint32_t pipe_id,
+ uint32_t inst)
+{
+ uint32_t mec;
+ uint32_t pipe;
+
+ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+ lock_srbm(adev, mec, pipe, 0, 0);
+
+ WREG32_SOC15(GC, 0, regCPC_INT_CNTL,
+ CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
+ CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
+
+ unlock_srbm(adev);
+
+ return 0;
+}
+
+static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
+ unsigned int engine_id,
+ unsigned int queue_id)
+{
+ uint32_t sdma_engine_reg_base = 0;
+ uint32_t sdma_rlc_reg_offset;
+
+ switch (engine_id) {
+ case 0:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+ regSDMA0_QUEUE0_RB_CNTL) - regSDMA0_QUEUE0_RB_CNTL;
+ break;
+ case 1:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
+ regSDMA1_QUEUE0_RB_CNTL) - regSDMA0_QUEUE0_RB_CNTL;
+ break;
+ default:
+ BUG();
+ }
+
+ sdma_rlc_reg_offset = sdma_engine_reg_base
+ + queue_id * (regSDMA0_QUEUE1_RB_CNTL - regSDMA0_QUEUE0_RB_CNTL);
+
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
+ queue_id, sdma_rlc_reg_offset);
+
+ return sdma_rlc_reg_offset;
+}
+
+static inline struct v11_compute_mqd *get_mqd(void *mqd)
+{
+ return (struct v11_compute_mqd *)mqd;
+}
+
+static inline struct v11_sdma_mqd *get_sdma_mqd(void *mqd)
+{
+ return (struct v11_sdma_mqd *)mqd;
+}
+
+static int hqd_load_v11(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t __user *wptr,
+ uint32_t wptr_shift, uint32_t wptr_mask,
+ struct mm_struct *mm, uint32_t inst)
+{
+ struct v11_compute_mqd *m;
+ uint32_t *mqd_hqd;
+ uint32_t reg, hqd_base, data;
+
+ m = get_mqd(mqd);
+
+ pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
+
+ /* HIQ is set during driver init period with vmid set to 0*/
+ if (m->cp_hqd_vmid == 0) {
+ uint32_t value, mec, pipe;
+
+ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+ pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
+ mec, pipe, queue_id);
+ value = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_CP_SCHEDULERS));
+ value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
+ ((mec << 5) | (pipe << 3) | queue_id | 0x80));
+ WREG32(SOC15_REG_OFFSET(GC, 0, regRLC_CP_SCHEDULERS), value);
+ }
+
+ /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
+ mqd_hqd = &m->cp_mqd_base_addr_lo;
+ hqd_base = SOC15_REG_OFFSET(GC, 0, regCP_MQD_BASE_ADDR);
+
+ for (reg = hqd_base;
+ reg <= SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_WPTR_HI); reg++)
+ WREG32(reg, mqd_hqd[reg - hqd_base]);
+
+
+ /* Activate doorbell logic before triggering WPTR poll. */
+ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
+ CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
+ WREG32(SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL), data);
+
+ if (wptr) {
+ /* Don't read wptr with get_user because the user
+ * context may not be accessible (if this function
+ * runs in a work queue). Instead trigger a one-shot
+ * polling read from memory in the CP. This assumes
+ * that wptr is GPU-accessible in the queue's VMID via
+ * ATC or SVM. WPTR==RPTR before starting the poll so
+ * the CP starts fetching new commands from the right
+ * place.
+ *
+ * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
+ * tricky. Assume that the queue didn't overflow. The
+ * number of valid bits in the 32-bit RPTR depends on
+ * the queue size. The remaining bits are taken from
+ * the saved 64-bit WPTR. If the WPTR wrapped, add the
+ * queue size.
+ */
+ uint32_t queue_size =
+ 2 << REG_GET_FIELD(m->cp_hqd_pq_control,
+ CP_HQD_PQ_CONTROL, QUEUE_SIZE);
+ uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
+
+ if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
+ guessed_wptr += queue_size;
+ guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
+ guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_WPTR_LO),
+ lower_32_bits(guessed_wptr));
+ WREG32(SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_WPTR_HI),
+ upper_32_bits(guessed_wptr));
+ WREG32(SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR),
+ lower_32_bits((uint64_t)wptr));
+ WREG32(SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR_HI),
+ upper_32_bits((uint64_t)wptr));
+ pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__,
+ (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
+ WREG32(SOC15_REG_OFFSET(GC, 0, regCP_PQ_WPTR_POLL_CNTL1),
+ (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
+ }
+
+ /* Start the EOP fetcher */
+ WREG32(SOC15_REG_OFFSET(GC, 0, regCP_HQD_EOP_RPTR),
+ REG_SET_FIELD(m->cp_hqd_eop_rptr,
+ CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
+
+ data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
+ WREG32(SOC15_REG_OFFSET(GC, 0, regCP_HQD_ACTIVE), data);
+
+ release_queue(adev);
+
+ return 0;
+}
+
+static int hiq_mqd_load_v11(struct amdgpu_device *adev, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t doorbell_off, uint32_t inst)
+{
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
+ struct v11_compute_mqd *m;
+ uint32_t mec, pipe;
+ int r;
+
+ m = get_mqd(mqd);
+
+ acquire_queue(adev, pipe_id, queue_id);
+
+ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+ pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
+ mec, pipe, queue_id);
+
+ spin_lock(&adev->gfx.kiq[0].ring_lock);
+ r = amdgpu_ring_alloc(kiq_ring, 7);
+ if (r) {
+ pr_err("Failed to alloc KIQ (%d).\n", r);
+ goto out_unlock;
+ }
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
+ PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
+ PACKET3_MAP_QUEUES_QUEUE(queue_id) |
+ PACKET3_MAP_QUEUES_PIPE(pipe) |
+ PACKET3_MAP_QUEUES_ME((mec - 1)) |
+ PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
+ PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
+ PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
+ PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
+ amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
+ amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
+ amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
+ amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
+ amdgpu_ring_commit(kiq_ring);
+
+out_unlock:
+ spin_unlock(&adev->gfx.kiq[0].ring_lock);
+ release_queue(adev);
+
+ return r;
+}
+
+static int hqd_dump_v11(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst)
+{
+ uint32_t i = 0, reg;
+#define HQD_N_REGS 56
+#define DUMP_REG(addr) do { \
+ if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
+ break; \
+ (*dump)[i][0] = (addr) << 2; \
+ (*dump)[i++][1] = RREG32(addr); \
+ } while (0)
+
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ acquire_queue(adev, pipe_id, queue_id);
+
+ for (reg = SOC15_REG_OFFSET(GC, 0, regCP_MQD_BASE_ADDR);
+ reg <= SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_WPTR_HI); reg++)
+ DUMP_REG(reg);
+
+ release_queue(adev);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
+
+ return 0;
+}
+
+static int hqd_sdma_load_v11(struct amdgpu_device *adev, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm)
+{
+ struct v11_sdma_mqd *m;
+ uint32_t sdma_rlc_reg_offset;
+ unsigned long end_jiffies;
+ uint32_t data;
+ uint64_t data64;
+ uint64_t __user *wptr64 = (uint64_t __user *)wptr;
+
+ m = get_sdma_mqd(mqd);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
+ m->sdma_queue_id);
+
+ WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_CNTL,
+ m->sdmax_rlcx_rb_cntl & (~SDMA0_QUEUE0_RB_CNTL__RB_ENABLE_MASK));
+
+ end_jiffies = msecs_to_jiffies(2000) + jiffies;
+ while (true) {
+ data = RREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_CONTEXT_STATUS);
+ if (data & SDMA0_QUEUE0_CONTEXT_STATUS__IDLE_MASK)
+ break;
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
+ return -ETIME;
+ }
+ usleep_range(500, 1000);
+ }
+
+ WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_DOORBELL_OFFSET,
+ m->sdmax_rlcx_doorbell_offset);
+
+ data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_QUEUE0_DOORBELL,
+ ENABLE, 1);
+ WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_RPTR_HI,
+ m->sdmax_rlcx_rb_rptr_hi);
+
+ WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_MINOR_PTR_UPDATE, 1);
+ if (read_user_wptr(mm, wptr64, data64)) {
+ WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_WPTR,
+ lower_32_bits(data64));
+ WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_WPTR_HI,
+ upper_32_bits(data64));
+ } else {
+ WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_WPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_WPTR_HI,
+ m->sdmax_rlcx_rb_rptr_hi);
+ }
+ WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_MINOR_PTR_UPDATE, 0);
+
+ WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_BASE_HI,
+ m->sdmax_rlcx_rb_base_hi);
+ WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_RPTR_ADDR_LO,
+ m->sdmax_rlcx_rb_rptr_addr_lo);
+ WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_RPTR_ADDR_HI,
+ m->sdmax_rlcx_rb_rptr_addr_hi);
+
+ data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_QUEUE0_RB_CNTL,
+ RB_ENABLE, 1);
+ WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_CNTL, data);
+
+ return 0;
+}
+
+static int hqd_sdma_dump_v11(struct amdgpu_device *adev,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs)
+{
+ uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
+ engine_id, queue_id);
+ uint32_t i = 0, reg;
+#undef HQD_N_REGS
+#define HQD_N_REGS (7+11+1+12+12)
+
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ for (reg = regSDMA0_QUEUE0_RB_CNTL;
+ reg <= regSDMA0_QUEUE0_RB_WPTR_HI; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+ for (reg = regSDMA0_QUEUE0_RB_RPTR_ADDR_HI;
+ reg <= regSDMA0_QUEUE0_DOORBELL; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+ for (reg = regSDMA0_QUEUE0_DOORBELL_LOG;
+ reg <= regSDMA0_QUEUE0_DOORBELL_LOG; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+ for (reg = regSDMA0_QUEUE0_DOORBELL_OFFSET;
+ reg <= regSDMA0_QUEUE0_RB_PREEMPT; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+ for (reg = regSDMA0_QUEUE0_MIDCMD_DATA0;
+ reg <= regSDMA0_QUEUE0_MIDCMD_CNTL; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
+
+ return 0;
+}
+
+static bool hqd_is_occupied_v11(struct amdgpu_device *adev, uint64_t queue_address,
+ uint32_t pipe_id, uint32_t queue_id, uint32_t inst)
+{
+ uint32_t act;
+ bool retval = false;
+ uint32_t low, high;
+
+ acquire_queue(adev, pipe_id, queue_id);
+ act = RREG32(SOC15_REG_OFFSET(GC, 0, regCP_HQD_ACTIVE));
+ if (act) {
+ low = lower_32_bits(queue_address >> 8);
+ high = upper_32_bits(queue_address >> 8);
+
+ if (low == RREG32(SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_BASE)) &&
+ high == RREG32(SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_BASE_HI)))
+ retval = true;
+ }
+ release_queue(adev);
+ return retval;
+}
+
+static bool hqd_sdma_is_occupied_v11(struct amdgpu_device *adev, void *mqd)
+{
+ struct v11_sdma_mqd *m;
+ uint32_t sdma_rlc_reg_offset;
+ uint32_t sdma_rlc_rb_cntl;
+
+ m = get_sdma_mqd(mqd);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
+ m->sdma_queue_id);
+
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_CNTL);
+
+ if (sdma_rlc_rb_cntl & SDMA0_QUEUE0_RB_CNTL__RB_ENABLE_MASK)
+ return true;
+
+ return false;
+}
+
+static int hqd_destroy_v11(struct amdgpu_device *adev, void *mqd,
+ enum kfd_preempt_type reset_type,
+ unsigned int utimeout, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t inst)
+{
+ enum hqd_dequeue_request_type type;
+ unsigned long end_jiffies;
+ uint32_t temp;
+ struct v11_compute_mqd *m = get_mqd(mqd);
+
+ acquire_queue(adev, pipe_id, queue_id);
+
+ if (m->cp_hqd_vmid == 0)
+ WREG32_FIELD15_PREREG(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
+
+ switch (reset_type) {
+ case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
+ type = DRAIN_PIPE;
+ break;
+ case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
+ type = RESET_WAVES;
+ break;
+ default:
+ type = DRAIN_PIPE;
+ break;
+ }
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, regCP_HQD_DEQUEUE_REQUEST), type);
+
+ end_jiffies = (utimeout * HZ / 1000) + jiffies;
+ while (true) {
+ temp = RREG32(SOC15_REG_OFFSET(GC, 0, regCP_HQD_ACTIVE));
+ if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
+ break;
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("cp queue pipe %d queue %d preemption failed\n",
+ pipe_id, queue_id);
+ release_queue(adev);
+ return -ETIME;
+ }
+ usleep_range(500, 1000);
+ }
+
+ release_queue(adev);
+ return 0;
+}
+
+static int hqd_sdma_destroy_v11(struct amdgpu_device *adev, void *mqd,
+ unsigned int utimeout)
+{
+ struct v11_sdma_mqd *m;
+ uint32_t sdma_rlc_reg_offset;
+ uint32_t temp;
+ unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
+
+ m = get_sdma_mqd(mqd);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
+ m->sdma_queue_id);
+
+ temp = RREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_CNTL);
+ temp = temp & ~SDMA0_QUEUE0_RB_CNTL__RB_ENABLE_MASK;
+ WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_CNTL, temp);
+
+ while (true) {
+ temp = RREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_CONTEXT_STATUS);
+ if (temp & SDMA0_QUEUE0_CONTEXT_STATUS__IDLE_MASK)
+ break;
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
+ return -ETIME;
+ }
+ usleep_range(500, 1000);
+ }
+
+ WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_CNTL) |
+ SDMA0_QUEUE0_RB_CNTL__RB_ENABLE_MASK);
+
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr_hi =
+ RREG32(sdma_rlc_reg_offset + regSDMA0_QUEUE0_RB_RPTR_HI);
+
+ return 0;
+}
+
+static int wave_control_execute_v11(struct amdgpu_device *adev,
+ uint32_t gfx_index_val,
+ uint32_t sq_cmd, uint32_t inst)
+{
+ uint32_t data = 0;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX), gfx_index_val);
+ WREG32(SOC15_REG_OFFSET(GC, 0, regSQ_CMD), sq_cmd);
+
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+ INSTANCE_BROADCAST_WRITES, 1);
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+ SA_BROADCAST_WRITES, 1);
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+ SE_BROADCAST_WRITES, 1);
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX), data);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+static void set_vm_context_page_table_base_v11(struct amdgpu_device *adev,
+ uint32_t vmid, uint64_t page_table_base)
+{
+ if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
+ pr_err("trying to set page table base for wrong VMID %u\n",
+ vmid);
+ return;
+ }
+
+ /* SDMA is on gfxhub as well for gfx11 adapters */
+ adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
+}
+
+/*
+ * Returns TRAP_EN, EXCP_EN and EXCP_REPLACE.
+ *
+ * restore_dbg_registers is ignored here but is a general interface requirement
+ * for devices that support GFXOFF and where the RLC save/restore list
+ * does not support hw registers for debugging i.e. the driver has to manually
+ * initialize the debug mode registers after it has disabled GFX off during the
+ * debug session.
+ */
+static uint32_t kgd_gfx_v11_enable_debug_trap(struct amdgpu_device *adev,
+ bool restore_dbg_registers,
+ uint32_t vmid)
+{
+ uint32_t data = 0;
+
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0);
+
+ return data;
+}
+
+/* Returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */
+static uint32_t kgd_gfx_v11_disable_debug_trap(struct amdgpu_device *adev,
+ bool keep_trap_enabled,
+ uint32_t vmid)
+{
+ uint32_t data = 0;
+
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0);
+
+ return data;
+}
+
+static int kgd_gfx_v11_validate_trap_override_request(struct amdgpu_device *adev,
+ uint32_t trap_override,
+ uint32_t *trap_mask_supported)
+{
+ *trap_mask_supported &= KFD_DBG_TRAP_MASK_FP_INVALID |
+ KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL |
+ KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO |
+ KFD_DBG_TRAP_MASK_FP_OVERFLOW |
+ KFD_DBG_TRAP_MASK_FP_UNDERFLOW |
+ KFD_DBG_TRAP_MASK_FP_INEXACT |
+ KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO |
+ KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH |
+ KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 4))
+ *trap_mask_supported |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START |
+ KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END;
+
+ if (trap_override != KFD_DBG_TRAP_OVERRIDE_OR &&
+ trap_override != KFD_DBG_TRAP_OVERRIDE_REPLACE)
+ return -EPERM;
+
+ return 0;
+}
+
+static uint32_t trap_mask_map_sw_to_hw(uint32_t mask)
+{
+ uint32_t trap_on_start = (mask & KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START) ? 1 : 0;
+ uint32_t trap_on_end = (mask & KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END) ? 1 : 0;
+ uint32_t excp_en = mask & (KFD_DBG_TRAP_MASK_FP_INVALID |
+ KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL |
+ KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO |
+ KFD_DBG_TRAP_MASK_FP_OVERFLOW |
+ KFD_DBG_TRAP_MASK_FP_UNDERFLOW |
+ KFD_DBG_TRAP_MASK_FP_INEXACT |
+ KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO |
+ KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH |
+ KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION);
+ uint32_t ret;
+
+ ret = REG_SET_FIELD(0, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, excp_en);
+ ret = REG_SET_FIELD(ret, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_START, trap_on_start);
+ ret = REG_SET_FIELD(ret, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_END, trap_on_end);
+
+ return ret;
+}
+
+static uint32_t trap_mask_map_hw_to_sw(uint32_t mask)
+{
+ uint32_t ret = REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, EXCP_EN);
+
+ if (REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_START))
+ ret |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START;
+
+ if (REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_END))
+ ret |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END;
+
+ return ret;
+}
+
+/* Returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */
+static uint32_t kgd_gfx_v11_set_wave_launch_trap_override(struct amdgpu_device *adev,
+ uint32_t vmid,
+ uint32_t trap_override,
+ uint32_t trap_mask_bits,
+ uint32_t trap_mask_request,
+ uint32_t *trap_mask_prev,
+ uint32_t kfd_dbg_trap_cntl_prev)
+{
+ uint32_t data = 0;
+
+ *trap_mask_prev = trap_mask_map_hw_to_sw(kfd_dbg_trap_cntl_prev);
+
+ data = (trap_mask_bits & trap_mask_request) | (*trap_mask_prev & ~trap_mask_request);
+ data = trap_mask_map_sw_to_hw(data);
+
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, trap_override);
+
+ return data;
+}
+
+static uint32_t kgd_gfx_v11_set_wave_launch_mode(struct amdgpu_device *adev,
+ uint8_t wave_launch_mode,
+ uint32_t vmid)
+{
+ uint32_t data = 0;
+
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, LAUNCH_MODE, wave_launch_mode);
+
+ return data;
+}
+
+#define TCP_WATCH_STRIDE (regTCP_WATCH1_ADDR_H - regTCP_WATCH0_ADDR_H)
+static uint32_t kgd_gfx_v11_set_address_watch(struct amdgpu_device *adev,
+ uint64_t watch_address,
+ uint32_t watch_address_mask,
+ uint32_t watch_id,
+ uint32_t watch_mode,
+ uint32_t debug_vmid,
+ uint32_t inst)
+{
+ uint32_t watch_address_high;
+ uint32_t watch_address_low;
+ uint32_t watch_address_cntl;
+
+ watch_address_cntl = 0;
+ watch_address_low = lower_32_bits(watch_address);
+ watch_address_high = upper_32_bits(watch_address) & 0xffff;
+
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ MODE,
+ watch_mode);
+
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ MASK,
+ watch_address_mask >> 7);
+
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ VALID,
+ 1);
+
+ WREG32_RLC((SOC15_REG_OFFSET(GC, 0, regTCP_WATCH0_ADDR_H) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_high);
+
+ WREG32_RLC((SOC15_REG_OFFSET(GC, 0, regTCP_WATCH0_ADDR_L) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_low);
+
+ return watch_address_cntl;
+}
+
+static uint32_t kgd_gfx_v11_clear_address_watch(struct amdgpu_device *adev,
+ uint32_t watch_id)
+{
+ return 0;
+}
+
+static uint64_t kgd_gfx_v11_hqd_get_pq_addr(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst)
+{
+ return 0;
+}
+
+static uint64_t kgd_gfx_v11_hqd_reset(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst, unsigned int utimeout)
+{
+ return 0;
+}
+
+static uint32_t kgd_gfx_v11_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+{
+ return 0;
+}
+
+const struct kfd2kgd_calls gfx_v11_kfd2kgd = {
+ .program_sh_mem_settings = program_sh_mem_settings_v11,
+ .set_pasid_vmid_mapping = set_pasid_vmid_mapping_v11,
+ .init_interrupts = init_interrupts_v11,
+ .hqd_load = hqd_load_v11,
+ .hiq_mqd_load = hiq_mqd_load_v11,
+ .hqd_sdma_load = hqd_sdma_load_v11,
+ .hqd_dump = hqd_dump_v11,
+ .hqd_sdma_dump = hqd_sdma_dump_v11,
+ .hqd_is_occupied = hqd_is_occupied_v11,
+ .hqd_sdma_is_occupied = hqd_sdma_is_occupied_v11,
+ .hqd_destroy = hqd_destroy_v11,
+ .hqd_sdma_destroy = hqd_sdma_destroy_v11,
+ .wave_control_execute = wave_control_execute_v11,
+ .get_atc_vmid_pasid_mapping_info = NULL,
+ .set_vm_context_page_table_base = set_vm_context_page_table_base_v11,
+ .enable_debug_trap = kgd_gfx_v11_enable_debug_trap,
+ .disable_debug_trap = kgd_gfx_v11_disable_debug_trap,
+ .validate_trap_override_request = kgd_gfx_v11_validate_trap_override_request,
+ .set_wave_launch_trap_override = kgd_gfx_v11_set_wave_launch_trap_override,
+ .set_wave_launch_mode = kgd_gfx_v11_set_wave_launch_mode,
+ .set_address_watch = kgd_gfx_v11_set_address_watch,
+ .clear_address_watch = kgd_gfx_v11_clear_address_watch,
+ .hqd_get_pq_addr = kgd_gfx_v11_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v11_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v11_hqd_sdma_get_doorbell
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c
new file mode 100644
index 000000000000..e0ceab400b2d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v12.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
+#include "gc/gc_12_0_0_offset.h"
+#include "gc/gc_12_0_0_sh_mask.h"
+#include "soc24.h"
+#include <uapi/linux/kfd_ioctl.h>
+
+static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
+ uint32_t queue, uint32_t vmid)
+{
+ mutex_lock(&adev->srbm_mutex);
+ soc24_grbm_select(adev, mec, pipe, queue, vmid);
+}
+
+static void unlock_srbm(struct amdgpu_device *adev)
+{
+ soc24_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+}
+
+static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
+ uint32_t queue_id)
+{
+ uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+ lock_srbm(adev, mec, pipe, queue_id, 0);
+}
+
+static void release_queue(struct amdgpu_device *adev)
+{
+ unlock_srbm(adev);
+}
+
+static int init_interrupts_v12(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t inst)
+{
+ uint32_t mec;
+ uint32_t pipe;
+
+ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+ lock_srbm(adev, mec, pipe, 0, 0);
+
+ WREG32_SOC15(GC, 0, regCPC_INT_CNTL,
+ CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
+ CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
+
+ unlock_srbm(adev);
+
+ return 0;
+}
+
+static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
+ unsigned int engine_id,
+ unsigned int queue_id)
+{
+ uint32_t sdma_engine_reg_base = 0;
+ uint32_t sdma_rlc_reg_offset;
+
+ switch (engine_id) {
+ case 0:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+ regSDMA0_QUEUE0_RB_CNTL) - regSDMA0_QUEUE0_RB_CNTL;
+ break;
+ case 1:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
+ regSDMA1_QUEUE0_RB_CNTL) - regSDMA0_QUEUE0_RB_CNTL;
+ break;
+ default:
+ BUG();
+ }
+
+ sdma_rlc_reg_offset = sdma_engine_reg_base
+ + queue_id * (regSDMA0_QUEUE1_RB_CNTL - regSDMA0_QUEUE0_RB_CNTL);
+
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
+ queue_id, sdma_rlc_reg_offset);
+
+ return sdma_rlc_reg_offset;
+}
+
+static int hqd_dump_v12(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst)
+{
+ uint32_t i = 0, reg;
+#define HQD_N_REGS 56
+#define DUMP_REG(addr) do { \
+ if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
+ break; \
+ (*dump)[i][0] = (addr) << 2; \
+ (*dump)[i++][1] = RREG32(addr); \
+ } while (0)
+
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ acquire_queue(adev, pipe_id, queue_id);
+
+ for (reg = SOC15_REG_OFFSET(GC, 0, regCP_MQD_BASE_ADDR);
+ reg <= SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_WPTR_HI); reg++)
+ DUMP_REG(reg);
+
+ release_queue(adev);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
+
+ return 0;
+}
+
+static int hqd_sdma_dump_v12(struct amdgpu_device *adev,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs)
+{
+ uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
+ engine_id, queue_id);
+ uint32_t i = 0, reg;
+
+ const uint32_t first_reg = regSDMA0_QUEUE0_RB_CNTL;
+ const uint32_t last_reg = regSDMA0_QUEUE0_CONTEXT_STATUS;
+#undef HQD_N_REGS
+#define HQD_N_REGS (last_reg - first_reg + 1)
+
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ for (reg = first_reg;
+ reg <= last_reg; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
+
+ return 0;
+}
+
+static int wave_control_execute_v12(struct amdgpu_device *adev,
+ uint32_t gfx_index_val,
+ uint32_t sq_cmd, uint32_t inst)
+{
+ uint32_t data = 0;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX), gfx_index_val);
+ WREG32(SOC15_REG_OFFSET(GC, 0, regSQ_CMD), sq_cmd);
+
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+ INSTANCE_BROADCAST_WRITES, 1);
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+ SA_BROADCAST_WRITES, 1);
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+ SE_BROADCAST_WRITES, 1);
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, regGRBM_GFX_INDEX), data);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+/* returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */
+static uint32_t kgd_gfx_v12_enable_debug_trap(struct amdgpu_device *adev,
+ bool restore_dbg_registers,
+ uint32_t vmid)
+{
+ uint32_t data = 0;
+
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0);
+
+ return data;
+}
+
+/* returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */
+static uint32_t kgd_gfx_v12_disable_debug_trap(struct amdgpu_device *adev,
+ bool keep_trap_enabled,
+ uint32_t vmid)
+{
+ uint32_t data = 0;
+
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, 0);
+
+ return data;
+}
+
+static int kgd_gfx_v12_validate_trap_override_request(struct amdgpu_device *adev,
+ uint32_t trap_override,
+ uint32_t *trap_mask_supported)
+{
+ *trap_mask_supported &= KFD_DBG_TRAP_MASK_FP_INVALID |
+ KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL |
+ KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO |
+ KFD_DBG_TRAP_MASK_FP_OVERFLOW |
+ KFD_DBG_TRAP_MASK_FP_UNDERFLOW |
+ KFD_DBG_TRAP_MASK_FP_INEXACT |
+ KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO |
+ KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH |
+ KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION |
+ KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START |
+ KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END;
+
+
+ if (trap_override != KFD_DBG_TRAP_OVERRIDE_OR &&
+ trap_override != KFD_DBG_TRAP_OVERRIDE_REPLACE)
+ return -EPERM;
+
+ return 0;
+}
+
+static uint32_t trap_mask_map_sw_to_hw(uint32_t mask)
+{
+ uint32_t trap_on_start = (mask & KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START) ? 1 : 0;
+ uint32_t trap_on_end = (mask & KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END) ? 1 : 0;
+ uint32_t excp_en = mask & (KFD_DBG_TRAP_MASK_FP_INVALID |
+ KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL |
+ KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO |
+ KFD_DBG_TRAP_MASK_FP_OVERFLOW |
+ KFD_DBG_TRAP_MASK_FP_UNDERFLOW |
+ KFD_DBG_TRAP_MASK_FP_INEXACT |
+ KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO |
+ KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH |
+ KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION);
+ uint32_t ret;
+
+ ret = REG_SET_FIELD(0, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, excp_en);
+ ret = REG_SET_FIELD(ret, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_START, trap_on_start);
+ ret = REG_SET_FIELD(ret, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_END, trap_on_end);
+
+ return ret;
+}
+
+static uint32_t trap_mask_map_hw_to_sw(uint32_t mask)
+{
+ uint32_t ret = REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, EXCP_EN);
+
+ if (REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_START))
+ ret |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START;
+
+ if (REG_GET_FIELD(mask, SPI_GDBG_PER_VMID_CNTL, TRAP_ON_END))
+ ret |= KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END;
+
+ return ret;
+}
+
+/* returns TRAP_EN, EXCP_EN and EXCP_REPLACE. */
+static uint32_t kgd_gfx_v12_set_wave_launch_trap_override(struct amdgpu_device *adev,
+ uint32_t vmid,
+ uint32_t trap_override,
+ uint32_t trap_mask_bits,
+ uint32_t trap_mask_request,
+ uint32_t *trap_mask_prev,
+ uint32_t kfd_dbg_trap_cntl_prev)
+
+{
+ uint32_t data = 0;
+
+ *trap_mask_prev = trap_mask_map_hw_to_sw(kfd_dbg_trap_cntl_prev);
+
+ data = (trap_mask_bits & trap_mask_request) | (*trap_mask_prev & ~trap_mask_request);
+ data = trap_mask_map_sw_to_hw(data);
+
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE, trap_override);
+
+ return data;
+}
+
+/* returns STALL_VMID or LAUNCH_MODE. */
+static uint32_t kgd_gfx_v12_set_wave_launch_mode(struct amdgpu_device *adev,
+ uint8_t wave_launch_mode,
+ uint32_t vmid)
+{
+ uint32_t data = 0;
+ bool is_stall_mode = wave_launch_mode == 4;
+
+ if (is_stall_mode)
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, STALL_VMID,
+ 1);
+ else
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, LAUNCH_MODE,
+ wave_launch_mode);
+
+ return data;
+}
+
+#define TCP_WATCH_STRIDE (regTCP_WATCH1_ADDR_H - regTCP_WATCH0_ADDR_H)
+static uint32_t kgd_gfx_v12_set_address_watch(struct amdgpu_device *adev,
+ uint64_t watch_address,
+ uint32_t watch_address_mask,
+ uint32_t watch_id,
+ uint32_t watch_mode,
+ uint32_t debug_vmid,
+ uint32_t inst)
+{
+ uint32_t watch_address_high;
+ uint32_t watch_address_low;
+ uint32_t watch_address_cntl;
+
+ watch_address_cntl = 0;
+ watch_address_low = lower_32_bits(watch_address);
+ watch_address_high = upper_32_bits(watch_address) & 0xffff;
+
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ MODE,
+ watch_mode);
+
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ MASK,
+ watch_address_mask >> 7);
+
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ VALID,
+ 1);
+
+ WREG32_RLC((SOC15_REG_OFFSET(GC, 0, regTCP_WATCH0_ADDR_H) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_high);
+
+ WREG32_RLC((SOC15_REG_OFFSET(GC, 0, regTCP_WATCH0_ADDR_L) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_low);
+
+ return watch_address_cntl;
+}
+
+static uint32_t kgd_gfx_v12_clear_address_watch(struct amdgpu_device *adev,
+ uint32_t watch_id)
+{
+ return 0;
+}
+
+static uint32_t kgd_gfx_v12_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+{
+ return 0;
+}
+
+const struct kfd2kgd_calls gfx_v12_kfd2kgd = {
+ .init_interrupts = init_interrupts_v12,
+ .hqd_dump = hqd_dump_v12,
+ .hqd_sdma_dump = hqd_sdma_dump_v12,
+ .wave_control_execute = wave_control_execute_v12,
+ .get_atc_vmid_pasid_mapping_info = NULL,
+ .enable_debug_trap = kgd_gfx_v12_enable_debug_trap,
+ .disable_debug_trap = kgd_gfx_v12_disable_debug_trap,
+ .validate_trap_override_request = kgd_gfx_v12_validate_trap_override_request,
+ .set_wave_launch_trap_override = kgd_gfx_v12_set_wave_launch_trap_override,
+ .set_wave_launch_mode = kgd_gfx_v12_set_wave_launch_mode,
+ .set_address_watch = kgd_gfx_v12_set_address_watch,
+ .clear_address_watch = kgd_gfx_v12_clear_address_watch,
+ .hqd_sdma_get_doorbell = kgd_gfx_v12_hqd_sdma_get_doorbell
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 1a0a5f7cccbc..df77558e03ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -20,15 +20,11 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/fdtable.h>
-#include <linux/uaccess.h>
-#include <linux/firmware.h>
-#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "cikd.h"
#include "cik_sdma.h"
-#include "amdgpu_ucode.h"
+#include "gfx_v7_0.h"
#include "gca/gfx_7_2_d.h"
#include "gca/gfx_7_2_enum.h"
#include "gca/gfx_7_2_sh_mask.h"
@@ -38,187 +34,65 @@
#include "gmc/gmc_7_1_sh_mask.h"
#include "cik_structs.h"
-#define CIK_PIPE_PER_MEC (4)
+enum hqd_dequeue_request_type {
+ NO_ACTION = 0,
+ DRAIN_PIPE,
+ RESET_WAVES
+};
enum {
MAX_TRAPID = 8, /* 3 bits in the bitfield. */
MAX_WATCH_ADDRESSES = 4
};
-enum {
- ADDRESS_WATCH_REG_ADDR_HI = 0,
- ADDRESS_WATCH_REG_ADDR_LO,
- ADDRESS_WATCH_REG_CNTL,
- ADDRESS_WATCH_REG_MAX
-};
-
-/* not defined in the CI/KV reg file */
-enum {
- ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL,
- ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF,
- ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000,
- /* extend the mask to 26 bits to match the low address field */
- ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6,
- ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF
-};
-
-static const uint32_t watchRegs[MAX_WATCH_ADDRESSES * ADDRESS_WATCH_REG_MAX] = {
- mmTCP_WATCH0_ADDR_H, mmTCP_WATCH0_ADDR_L, mmTCP_WATCH0_CNTL,
- mmTCP_WATCH1_ADDR_H, mmTCP_WATCH1_ADDR_L, mmTCP_WATCH1_CNTL,
- mmTCP_WATCH2_ADDR_H, mmTCP_WATCH2_ADDR_L, mmTCP_WATCH2_CNTL,
- mmTCP_WATCH3_ADDR_H, mmTCP_WATCH3_ADDR_L, mmTCP_WATCH3_CNTL
-};
-
-union TCP_WATCH_CNTL_BITS {
- struct {
- uint32_t mask:24;
- uint32_t vmid:4;
- uint32_t atc:1;
- uint32_t mode:2;
- uint32_t valid:1;
- } bitfields, bits;
- uint32_t u32All;
- signed int i32All;
- float f32All;
-};
-
-/*
- * Register access functions
- */
-
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
- uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
-
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
- unsigned int vmid);
-
-static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
- uint32_t hpd_size, uint64_t hpd_gpu_addr);
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr);
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id);
-
-static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
- unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id);
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int utimeout);
-static int kgd_address_watch_disable(struct kgd_dev *kgd);
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo);
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
- uint32_t gfx_index_val,
- uint32_t sq_cmd);
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- unsigned int reg_offset);
-
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid);
-static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
-
-static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
-
-static const struct kfd2kgd_calls kfd2kgd = {
- .init_gtt_mem_allocation = alloc_gtt_mem,
- .free_gtt_mem = free_gtt_mem,
- .get_vmem_size = get_vmem_size,
- .get_gpu_clock_counter = get_gpu_clock_counter,
- .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
- .program_sh_mem_settings = kgd_program_sh_mem_settings,
- .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
- .init_pipeline = kgd_init_pipeline,
- .init_interrupts = kgd_init_interrupts,
- .hqd_load = kgd_hqd_load,
- .hqd_sdma_load = kgd_hqd_sdma_load,
- .hqd_is_occupied = kgd_hqd_is_occupied,
- .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
- .hqd_destroy = kgd_hqd_destroy,
- .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
- .address_watch_disable = kgd_address_watch_disable,
- .address_watch_execute = kgd_address_watch_execute,
- .wave_control_execute = kgd_wave_control_execute,
- .address_watch_get_offset = kgd_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
- .write_vmid_invalidate_request = write_vmid_invalidate_request,
- .get_fw_version = get_fw_version
-};
-
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
-{
- return (struct kfd2kgd_calls *)&kfd2kgd;
-}
-
-static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
-{
- return (struct amdgpu_device *)kgd;
-}
-
-static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
mutex_lock(&adev->srbm_mutex);
WREG32(mmSRBM_GFX_CNTL, value);
}
-static void unlock_srbm(struct kgd_dev *kgd)
+static void unlock_srbm(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
WREG32(mmSRBM_GFX_CNTL, 0);
mutex_unlock(&adev->srbm_mutex);
}
-static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
uint32_t queue_id)
{
- uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
- uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
+ uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- lock_srbm(kgd, mec, pipe, queue_id, 0);
+ lock_srbm(adev, mec, pipe, queue_id, 0);
}
-static void release_queue(struct kgd_dev *kgd)
+static void release_queue(struct amdgpu_device *adev)
{
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base,
uint32_t sh_mem_ape1_limit,
- uint32_t sh_mem_bases)
+ uint32_t sh_mem_bases, uint32_t inst)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
- lock_srbm(kgd, 0, 0, 0, vmid);
+ lock_srbm(adev, 0, 0, 0, vmid);
WREG32(mmSH_MEM_CONFIG, sh_mem_config);
WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
WREG32(mmSH_MEM_BASES, sh_mem_bases);
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
- unsigned int vmid)
+static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
+ unsigned int vmid, uint32_t inst)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
/*
* We have to assume that there is no outstanding mapping.
* The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
@@ -240,51 +114,34 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
return 0;
}
-static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
- uint32_t hpd_size, uint64_t hpd_gpu_addr)
-{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
- uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
- uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
-
- lock_srbm(kgd, mec, pipe, 0, 0);
- WREG32(mmCP_HPD_EOP_BASE_ADDR, lower_32_bits(hpd_gpu_addr >> 8));
- WREG32(mmCP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(hpd_gpu_addr >> 8));
- WREG32(mmCP_HPD_EOP_VMID, 0);
- WREG32(mmCP_HPD_EOP_CONTROL, hpd_size);
- unlock_srbm(kgd);
-
- return 0;
-}
-
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
+static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id,
+ uint32_t inst)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t mec;
uint32_t pipe;
- mec = (pipe_id / CIK_PIPE_PER_MEC) + 1;
- pipe = (pipe_id % CIK_PIPE_PER_MEC);
+ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- lock_srbm(kgd, mec, pipe, 0, 0);
+ lock_srbm(adev, mec, pipe, 0, 0);
WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
- unlock_srbm(kgd);
+ unlock_srbm(adev);
return 0;
}
-static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
+static inline uint32_t get_sdma_rlc_reg_offset(struct cik_sdma_rlc_registers *m)
{
uint32_t retval;
retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
- pr_debug("kfd: sdma base address: 0x%x\n", retval);
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n",
+ m->sdma_engine_id, m->sdma_queue_id, retval);
return retval;
}
@@ -299,112 +156,176 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
return (struct cik_sdma_rlc_registers *)mqd;
}
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr)
+static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t __user *wptr, uint32_t wptr_shift,
+ uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
- uint32_t wptr_shadow, is_wptr_shadow_valid;
struct cik_mqd *m;
+ uint32_t *mqd_hqd;
+ uint32_t reg, wptr_val, data;
+ bool valid_wptr = false;
m = get_mqd(mqd);
- is_wptr_shadow_valid = !get_user(wptr_shadow, wptr);
+ acquire_queue(adev, pipe_id, queue_id);
- acquire_queue(kgd, pipe_id, queue_id);
- WREG32(mmCP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo);
- WREG32(mmCP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi);
- WREG32(mmCP_MQD_CONTROL, m->cp_mqd_control);
+ /* HQD registers extend from CP_MQD_BASE_ADDR to CP_MQD_CONTROL. */
+ mqd_hqd = &m->cp_mqd_base_addr_lo;
- WREG32(mmCP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo);
- WREG32(mmCP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi);
- WREG32(mmCP_HQD_PQ_CONTROL, m->cp_hqd_pq_control);
+ for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++)
+ WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
- WREG32(mmCP_HQD_IB_CONTROL, m->cp_hqd_ib_control);
- WREG32(mmCP_HQD_IB_BASE_ADDR, m->cp_hqd_ib_base_addr_lo);
- WREG32(mmCP_HQD_IB_BASE_ADDR_HI, m->cp_hqd_ib_base_addr_hi);
-
- WREG32(mmCP_HQD_IB_RPTR, m->cp_hqd_ib_rptr);
-
- WREG32(mmCP_HQD_PERSISTENT_STATE, m->cp_hqd_persistent_state);
- WREG32(mmCP_HQD_SEMA_CMD, m->cp_hqd_sema_cmd);
- WREG32(mmCP_HQD_MSG_TYPE, m->cp_hqd_msg_type);
+ /* Copy userspace write pointer value to register.
+ * Activate doorbell logic to monitor subsequent changes.
+ */
+ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
+ CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
+ WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
- WREG32(mmCP_HQD_ATOMIC0_PREOP_LO, m->cp_hqd_atomic0_preop_lo);
- WREG32(mmCP_HQD_ATOMIC0_PREOP_HI, m->cp_hqd_atomic0_preop_hi);
- WREG32(mmCP_HQD_ATOMIC1_PREOP_LO, m->cp_hqd_atomic1_preop_lo);
- WREG32(mmCP_HQD_ATOMIC1_PREOP_HI, m->cp_hqd_atomic1_preop_hi);
+ /* read_user_ptr may take the mm->mmap_lock.
+ * release srbm_mutex to avoid circular dependency between
+ * srbm_mutex->mmap_lock->reservation_ww_class_mutex->srbm_mutex.
+ */
+ release_queue(adev);
+ valid_wptr = read_user_wptr(mm, wptr, wptr_val);
+ acquire_queue(adev, pipe_id, queue_id);
+ if (valid_wptr)
+ WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
- WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, m->cp_hqd_pq_rptr_report_addr_lo);
- WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
- m->cp_hqd_pq_rptr_report_addr_hi);
+ data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
+ WREG32(mmCP_HQD_ACTIVE, data);
- WREG32(mmCP_HQD_PQ_RPTR, m->cp_hqd_pq_rptr);
+ release_queue(adev);
- WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, m->cp_hqd_pq_wptr_poll_addr_lo);
- WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, m->cp_hqd_pq_wptr_poll_addr_hi);
+ return 0;
+}
- WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, m->cp_hqd_pq_doorbell_control);
+static int kgd_hqd_dump(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst)
+{
+ uint32_t i = 0, reg;
+#define HQD_N_REGS (35+4)
+#define DUMP_REG(addr) do { \
+ if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
+ break; \
+ (*dump)[i][0] = (addr) << 2; \
+ (*dump)[i++][1] = RREG32(addr); \
+ } while (0)
- WREG32(mmCP_HQD_VMID, m->cp_hqd_vmid);
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
- WREG32(mmCP_HQD_QUANTUM, m->cp_hqd_quantum);
+ acquire_queue(adev, pipe_id, queue_id);
- WREG32(mmCP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority);
- WREG32(mmCP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority);
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2);
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3);
- WREG32(mmCP_HQD_IQ_RPTR, m->cp_hqd_iq_rptr);
+ for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++)
+ DUMP_REG(reg);
- if (is_wptr_shadow_valid)
- WREG32(mmCP_HQD_PQ_WPTR, wptr_shadow);
+ release_queue(adev);
- WREG32(mmCP_HQD_ACTIVE, m->cp_hqd_active);
- release_queue(kgd);
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
return 0;
}
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
+static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
- uint32_t sdma_base_addr;
+ unsigned long end_jiffies;
+ uint32_t sdma_rlc_reg_offset;
+ uint32_t data;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
- m->sdma_rlc_virtual_addr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE,
- m->sdma_rlc_rb_base);
+ end_jiffies = msecs_to_jiffies(2000) + jiffies;
+ while (true) {
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
+ if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+ break;
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
+ return -ETIME;
+ }
+ usleep_range(500, 1000);
+ }
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
+ data = REG_SET_FIELD(m->sdma_rlc_doorbell, SDMA0_RLC0_DOORBELL,
+ ENABLE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdma_rlc_rb_rptr);
+
+ if (read_user_wptr(mm, wptr, data))
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data);
+ else
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
+ m->sdma_rlc_rb_rptr);
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR,
+ m->sdma_rlc_virtual_addr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
m->sdma_rlc_rb_base_hi);
-
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdma_rlc_rb_rptr_addr_lo);
-
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdma_rlc_rb_rptr_addr_hi);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
- m->sdma_rlc_doorbell);
+ data = REG_SET_FIELD(m->sdma_rlc_rb_cntl, SDMA0_RLC0_RB_CNTL,
+ RB_ENABLE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
+
+ return 0;
+}
+
+static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs)
+{
+ uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
+ queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
+ uint32_t i = 0, reg;
+#undef HQD_N_REGS
+#define HQD_N_REGS (19+4)
+
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
- m->sdma_rlc_rb_cntl);
+ for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
+ DUMP_REG(sdma_offset + reg);
+ for (reg = mmSDMA0_RLC0_VIRTUAL_ADDR; reg <= mmSDMA0_RLC0_WATERMARK;
+ reg++)
+ DUMP_REG(sdma_offset + reg);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
return 0;
}
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id)
+static bool kgd_hqd_is_occupied(struct amdgpu_device *adev,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t inst)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t act;
bool retval = false;
uint32_t low, high;
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
act = RREG32(mmCP_HQD_ACTIVE);
if (act) {
low = lower_32_bits(queue_address >> 8);
@@ -414,21 +335,20 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
high == RREG32(mmCP_HQD_PQ_BASE_HI))
retval = true;
}
- release_queue(kgd);
+ release_queue(adev);
return retval;
}
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
@@ -436,126 +356,147 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
return false;
}
-static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd,
+ enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id)
+ uint32_t queue_id, uint32_t inst)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t temp;
- int timeout = utimeout;
+ enum hqd_dequeue_request_type type;
+ unsigned long flags, end_jiffies;
+ int retry;
- acquire_queue(kgd, pipe_id, queue_id);
+ if (amdgpu_in_reset(adev))
+ return -EIO;
+
+ acquire_queue(adev, pipe_id, queue_id);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
- WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type);
+ switch (reset_type) {
+ case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
+ type = DRAIN_PIPE;
+ break;
+ case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
+ type = RESET_WAVES;
+ break;
+ default:
+ type = DRAIN_PIPE;
+ break;
+ }
+
+ /* Workaround: If IQ timer is active and the wait time is close to or
+ * equal to 0, dequeueing is not safe. Wait until either the wait time
+ * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
+ * cleared before continuing. Also, ensure wait times are set to at
+ * least 0x3.
+ */
+ local_irq_save(flags);
+ preempt_disable();
+ retry = 5000; /* wait for 500 usecs at maximum */
+ while (true) {
+ temp = RREG32(mmCP_HQD_IQ_TIMER);
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
+ pr_debug("HW is processing IQ\n");
+ goto loop;
+ }
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
+ == 3) /* SEM-rearm is safe */
+ break;
+ /* Wait time 3 is safe for CP, but our MMIO read/write
+ * time is close to 1 microsecond, so check for 10 to
+ * leave more buffer room
+ */
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
+ >= 10)
+ break;
+ pr_debug("IQ timer is active\n");
+ } else
+ break;
+loop:
+ if (!retry) {
+ pr_err("CP HQD IQ timer status time out\n");
+ break;
+ }
+ ndelay(100);
+ --retry;
+ }
+ retry = 1000;
+ while (true) {
+ temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
+ if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
+ break;
+ pr_debug("Dequeue request is pending\n");
+ if (!retry) {
+ pr_err("CP HQD dequeue request time out\n");
+ break;
+ }
+ ndelay(100);
+ --retry;
+ }
+ local_irq_restore(flags);
+ preempt_enable();
+
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
+
+ end_jiffies = (utimeout * HZ / 1000) + jiffies;
while (true) {
temp = RREG32(mmCP_HQD_ACTIVE);
- if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
+ if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
break;
- if (timeout <= 0) {
- pr_err("kfd: cp queue preemption time out.\n");
- release_queue(kgd);
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("cp queue preemption time out\n");
+ release_queue(adev);
return -ETIME;
}
- msleep(20);
- timeout -= 20;
+ usleep_range(500, 1000);
}
- release_queue(kgd);
+ release_queue(adev);
return 0;
}
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
unsigned int utimeout)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
- uint32_t sdma_base_addr;
+ uint32_t sdma_rlc_reg_offset;
uint32_t temp;
- int timeout = utimeout;
+ unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
- if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
+ if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (timeout <= 0)
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
- msleep(20);
- timeout -= 20;
+ }
+ usleep_range(500, 1000);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
+ SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
- return 0;
-}
-
-static int kgd_address_watch_disable(struct kgd_dev *kgd)
-{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
- union TCP_WATCH_CNTL_BITS cntl;
- unsigned int i;
-
- cntl.u32All = 0;
-
- cntl.bitfields.valid = 0;
- cntl.bitfields.mask = ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK;
- cntl.bitfields.atc = 1;
-
- /* Turning off this address until we set all the registers */
- for (i = 0; i < MAX_WATCH_ADDRESSES; i++)
- WREG32(watchRegs[i * ADDRESS_WATCH_REG_MAX +
- ADDRESS_WATCH_REG_CNTL], cntl.u32All);
-
- return 0;
-}
-
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo)
-{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
- union TCP_WATCH_CNTL_BITS cntl;
-
- cntl.u32All = cntl_val;
-
- /* Turning off this watch point until we set all the registers */
- cntl.bitfields.valid = 0;
- WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
- ADDRESS_WATCH_REG_CNTL], cntl.u32All);
-
- WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
- ADDRESS_WATCH_REG_ADDR_HI], addr_hi);
-
- WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
- ADDRESS_WATCH_REG_ADDR_LO], addr_lo);
-
- /* Enable the watch point */
- cntl.bitfields.valid = 1;
-
- WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
- ADDRESS_WATCH_REG_CNTL], cntl.u32All);
+ m->sdma_rlc_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
return 0;
}
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
+static int kgd_wave_control_execute(struct amdgpu_device *adev,
uint32_t gfx_index_val,
- uint32_t sq_cmd)
+ uint32_t sq_cmd, uint32_t inst)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t data;
mutex_lock(&adev->grbm_idx_mutex);
@@ -576,96 +517,73 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd,
return 0;
}
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- unsigned int reg_offset)
+static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
+ uint8_t vmid, uint16_t *p_pasid)
{
- return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
-}
+ uint32_t value;
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid)
-{
- uint32_t reg;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+ value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
- reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid)
+static void set_scratch_backing_va(struct amdgpu_device *adev,
+ uint64_t va, uint32_t vmid)
{
- uint32_t reg;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
- reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+ lock_srbm(adev, 0, 0, 0, vmid);
+ WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
+ unlock_srbm(adev);
}
-static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
+static void set_vm_context_page_table_base(struct amdgpu_device *adev,
+ uint32_t vmid, uint64_t page_table_base)
{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
- WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
+ if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
+ pr_err("trying to set page table base for wrong VMID\n");
+ return;
+ }
+ WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8,
+ lower_32_bits(page_table_base));
}
-static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
+ /**
+ * read_vmid_from_vmfault_reg - read vmid from register
+ *
+ * adev: amdgpu_device pointer
+ * @vmid: vmid pointer
+ * read vmid from register (CIK).
+ */
+static uint32_t read_vmid_from_vmfault_reg(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- const union amdgpu_firmware_header *hdr;
-
- BUG_ON(kgd == NULL);
-
- switch (type) {
- case KGD_ENGINE_PFP:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.pfp_fw->data;
- break;
-
- case KGD_ENGINE_ME:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.me_fw->data;
- break;
-
- case KGD_ENGINE_CE:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.ce_fw->data;
- break;
-
- case KGD_ENGINE_MEC1:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec_fw->data;
- break;
-
- case KGD_ENGINE_MEC2:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec2_fw->data;
- break;
-
- case KGD_ENGINE_RLC:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.rlc_fw->data;
- break;
-
- case KGD_ENGINE_SDMA1:
- hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[0].fw->data;
- break;
+ uint32_t status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
- case KGD_ENGINE_SDMA2:
- hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[1].fw->data;
- break;
-
- default:
- return 0;
- }
+ return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
+}
- if (hdr == NULL)
- return 0;
+static uint32_t kgd_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
- /* Only 12 bit in use*/
- return hdr->common.ucode_version;
+{
+ return 0;
}
+const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
+ .program_sh_mem_settings = kgd_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_init_interrupts,
+ .hqd_load = kgd_hqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .wave_control_execute = kgd_wave_control_execute,
+ .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info,
+ .set_scratch_backing_va = set_scratch_backing_va,
+ .set_vm_context_page_table_base = set_vm_context_page_table_base,
+ .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
+ .hqd_sdma_get_doorbell = kgd_hqd_sdma_get_doorbell,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 6697612239c2..e68c0fa8d751 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -20,14 +20,9 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/module.h>
-#include <linux/fdtable.h>
-#include <linux/uaccess.h>
-#include <linux/firmware.h>
-#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
-#include "amdgpu_ucode.h"
+#include "gfx_v8_0.h"
#include "gca/gfx_8_0_sh_mask.h"
#include "gca/gfx_8_0_d.h"
#include "gca/gfx_8_0_enum.h"
@@ -38,148 +33,60 @@
#include "vi_structs.h"
#include "vid.h"
-#define VI_PIPE_PER_MEC (4)
-
-struct cik_sdma_rlc_registers;
-
-/*
- * Register access functions
- */
-
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
- uint32_t sh_mem_config,
- uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
- uint32_t sh_mem_bases);
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
- unsigned int vmid);
-static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
- uint32_t hpd_size, uint64_t hpd_gpu_addr);
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr);
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id);
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
-static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
- unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id);
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
- unsigned int utimeout);
-static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
-static int kgd_address_watch_disable(struct kgd_dev *kgd);
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo);
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
- uint32_t gfx_index_val,
- uint32_t sq_cmd);
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- unsigned int reg_offset);
-
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid);
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid);
-static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
-static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
-
-static const struct kfd2kgd_calls kfd2kgd = {
- .init_gtt_mem_allocation = alloc_gtt_mem,
- .free_gtt_mem = free_gtt_mem,
- .get_vmem_size = get_vmem_size,
- .get_gpu_clock_counter = get_gpu_clock_counter,
- .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
- .program_sh_mem_settings = kgd_program_sh_mem_settings,
- .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
- .init_pipeline = kgd_init_pipeline,
- .init_interrupts = kgd_init_interrupts,
- .hqd_load = kgd_hqd_load,
- .hqd_sdma_load = kgd_hqd_sdma_load,
- .hqd_is_occupied = kgd_hqd_is_occupied,
- .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
- .hqd_destroy = kgd_hqd_destroy,
- .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
- .address_watch_disable = kgd_address_watch_disable,
- .address_watch_execute = kgd_address_watch_execute,
- .wave_control_execute = kgd_wave_control_execute,
- .address_watch_get_offset = kgd_address_watch_get_offset,
- .get_atc_vmid_pasid_mapping_pasid =
- get_atc_vmid_pasid_mapping_pasid,
- .get_atc_vmid_pasid_mapping_valid =
- get_atc_vmid_pasid_mapping_valid,
- .write_vmid_invalidate_request = write_vmid_invalidate_request,
- .get_fw_version = get_fw_version
+enum hqd_dequeue_request_type {
+ NO_ACTION = 0,
+ DRAIN_PIPE,
+ RESET_WAVES
};
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
-{
- return (struct kfd2kgd_calls *)&kfd2kgd;
-}
-
-static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
-{
- return (struct amdgpu_device *)kgd;
-}
-
-static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
+static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
uint32_t queue, uint32_t vmid)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
mutex_lock(&adev->srbm_mutex);
WREG32(mmSRBM_GFX_CNTL, value);
}
-static void unlock_srbm(struct kgd_dev *kgd)
+static void unlock_srbm(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
WREG32(mmSRBM_GFX_CNTL, 0);
mutex_unlock(&adev->srbm_mutex);
}
-static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
+static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
uint32_t queue_id)
{
- uint32_t mec = (++pipe_id / VI_PIPE_PER_MEC) + 1;
- uint32_t pipe = (pipe_id % VI_PIPE_PER_MEC);
+ uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- lock_srbm(kgd, mec, pipe, queue_id, 0);
+ lock_srbm(adev, mec, pipe, queue_id, 0);
}
-static void release_queue(struct kgd_dev *kgd)
+static void release_queue(struct amdgpu_device *adev)
{
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
-static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
+static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
uint32_t sh_mem_config,
uint32_t sh_mem_ape1_base,
uint32_t sh_mem_ape1_limit,
- uint32_t sh_mem_bases)
+ uint32_t sh_mem_bases, uint32_t inst)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
- lock_srbm(kgd, 0, 0, 0, vmid);
+ lock_srbm(adev, 0, 0, 0, vmid);
WREG32(mmSH_MEM_CONFIG, sh_mem_config);
WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
WREG32(mmSH_MEM_BASES, sh_mem_bases);
- unlock_srbm(kgd);
+ unlock_srbm(adev);
}
-static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
- unsigned int vmid)
+static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
+ unsigned int vmid, uint32_t inst)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
-
/*
* We have to assume that there is no outstanding mapping.
* The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
@@ -202,33 +109,36 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
return 0;
}
-static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
- uint32_t hpd_size, uint64_t hpd_gpu_addr)
-{
- return 0;
-}
-
-static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
+static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id,
+ uint32_t inst)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t mec;
uint32_t pipe;
- mec = (++pipe_id / VI_PIPE_PER_MEC) + 1;
- pipe = (pipe_id % VI_PIPE_PER_MEC);
+ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- lock_srbm(kgd, mec, pipe, 0, 0);
+ lock_srbm(adev, mec, pipe, 0, 0);
- WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK);
+ WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
+ CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
- unlock_srbm(kgd);
+ unlock_srbm(adev);
return 0;
}
-static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
+static inline uint32_t get_sdma_rlc_reg_offset(struct vi_sdma_mqd *m)
{
- return 0;
+ uint32_t retval;
+
+ retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
+ m->sdma_queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
+
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n",
+ m->sdma_engine_id, m->sdma_queue_id, retval);
+
+ return retval;
}
static inline struct vi_mqd *get_mqd(void *mqd)
@@ -236,87 +146,218 @@ static inline struct vi_mqd *get_mqd(void *mqd)
return (struct vi_mqd *)mqd;
}
-static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
+static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
{
- return (struct cik_sdma_rlc_registers *)mqd;
+ return (struct vi_sdma_mqd *)mqd;
}
-static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
- uint32_t queue_id, uint32_t __user *wptr)
+static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t __user *wptr, uint32_t wptr_shift,
+ uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst)
{
struct vi_mqd *m;
- uint32_t shadow_wptr, valid_wptr;
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
+ uint32_t *mqd_hqd;
+ uint32_t reg, wptr_val, data;
+ bool valid_wptr = false;
m = get_mqd(mqd);
- valid_wptr = copy_from_user(&shadow_wptr, wptr, sizeof(shadow_wptr));
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
- WREG32(mmCP_MQD_CONTROL, m->cp_mqd_control);
- WREG32(mmCP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo);
- WREG32(mmCP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi);
+ /* HIQ is set during driver init period with vmid set to 0*/
+ if (m->cp_hqd_vmid == 0) {
+ uint32_t value, mec, pipe;
- WREG32(mmCP_HQD_VMID, m->cp_hqd_vmid);
- WREG32(mmCP_HQD_PERSISTENT_STATE, m->cp_hqd_persistent_state);
- WREG32(mmCP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority);
- WREG32(mmCP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority);
- WREG32(mmCP_HQD_QUANTUM, m->cp_hqd_quantum);
- WREG32(mmCP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo);
- WREG32(mmCP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi);
- WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, m->cp_hqd_pq_rptr_report_addr_lo);
- WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI,
- m->cp_hqd_pq_rptr_report_addr_hi);
+ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
- if (valid_wptr > 0)
- WREG32(mmCP_HQD_PQ_WPTR, shadow_wptr);
+ pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
+ mec, pipe, queue_id);
+ value = RREG32(mmRLC_CP_SCHEDULERS);
+ value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
+ ((mec << 5) | (pipe << 3) | queue_id | 0x80));
+ WREG32(mmRLC_CP_SCHEDULERS, value);
+ }
- WREG32(mmCP_HQD_PQ_CONTROL, m->cp_hqd_pq_control);
- WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, m->cp_hqd_pq_doorbell_control);
+ /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
+ mqd_hqd = &m->cp_mqd_base_addr_lo;
- WREG32(mmCP_HQD_EOP_BASE_ADDR, m->cp_hqd_eop_base_addr_lo);
- WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, m->cp_hqd_eop_base_addr_hi);
- WREG32(mmCP_HQD_EOP_CONTROL, m->cp_hqd_eop_control);
- WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr);
- WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr);
- WREG32(mmCP_HQD_EOP_EVENTS, m->cp_hqd_eop_done_events);
+ for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_CONTROL; reg++)
+ WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
- WREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO, m->cp_hqd_ctx_save_base_addr_lo);
- WREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI, m->cp_hqd_ctx_save_base_addr_hi);
- WREG32(mmCP_HQD_CTX_SAVE_CONTROL, m->cp_hqd_ctx_save_control);
- WREG32(mmCP_HQD_CNTL_STACK_OFFSET, m->cp_hqd_cntl_stack_offset);
- WREG32(mmCP_HQD_CNTL_STACK_SIZE, m->cp_hqd_cntl_stack_size);
- WREG32(mmCP_HQD_WG_STATE_OFFSET, m->cp_hqd_wg_state_offset);
- WREG32(mmCP_HQD_CTX_SAVE_SIZE, m->cp_hqd_ctx_save_size);
+ /* Tonga errata: EOP RPTR/WPTR should be left unmodified.
+ * This is safe since EOP RPTR==WPTR for any inactive HQD
+ * on ASICs that do not support context-save.
+ * EOP writes/reads can start anywhere in the ring.
+ */
+ if (adev->asic_type != CHIP_TONGA) {
+ WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr);
+ WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr);
+ WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem);
+ }
- WREG32(mmCP_HQD_IB_CONTROL, m->cp_hqd_ib_control);
+ for (reg = mmCP_HQD_EOP_EVENTS; reg <= mmCP_HQD_ERROR; reg++)
+ WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
- WREG32(mmCP_HQD_DEQUEUE_REQUEST, m->cp_hqd_dequeue_request);
- WREG32(mmCP_HQD_ERROR, m->cp_hqd_error);
- WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem);
- WREG32(mmCP_HQD_EOP_DONES, m->cp_hqd_eop_dones);
+ /* Copy userspace write pointer value to register.
+ * Activate doorbell logic to monitor subsequent changes.
+ */
+ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
+ CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
+ WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
+
+ /* read_user_ptr may take the mm->mmap_lock.
+ * release srbm_mutex to avoid circular dependency between
+ * srbm_mutex->mmap_lock->reservation_ww_class_mutex->srbm_mutex.
+ */
+ release_queue(adev);
+ valid_wptr = read_user_wptr(mm, wptr, wptr_val);
+ acquire_queue(adev, pipe_id, queue_id);
+ if (valid_wptr)
+ WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
- WREG32(mmCP_HQD_ACTIVE, m->cp_hqd_active);
+ data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
+ WREG32(mmCP_HQD_ACTIVE, data);
- release_queue(kgd);
+ release_queue(adev);
return 0;
}
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
+static int kgd_hqd_dump(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst)
{
+ uint32_t i = 0, reg;
+#define HQD_N_REGS (54+4)
+#define DUMP_REG(addr) do { \
+ if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
+ break; \
+ (*dump)[i][0] = (addr) << 2; \
+ (*dump)[i++][1] = RREG32(addr); \
+ } while (0)
+
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ acquire_queue(adev, pipe_id, queue_id);
+
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2);
+ DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3);
+
+ for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_DONES; reg++)
+ DUMP_REG(reg);
+
+ release_queue(adev);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
+
return 0;
}
-static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
- uint32_t pipe_id, uint32_t queue_id)
+static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm)
+{
+ struct vi_sdma_mqd *m;
+ unsigned long end_jiffies;
+ uint32_t sdma_rlc_reg_offset;
+ uint32_t data;
+
+ m = get_sdma_mqd(mqd);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
+
+ end_jiffies = msecs_to_jiffies(2000) + jiffies;
+ while (true) {
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
+ if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+ break;
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
+ return -ETIME;
+ }
+ usleep_range(500, 1000);
+ }
+
+ data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
+ ENABLE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
+
+ if (read_user_wptr(mm, wptr, data))
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data);
+ else
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
+ m->sdmax_rlcx_rb_rptr);
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR,
+ m->sdmax_rlcx_virtual_addr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
+ m->sdmax_rlcx_rb_base_hi);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ m->sdmax_rlcx_rb_rptr_addr_lo);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ m->sdmax_rlcx_rb_rptr_addr_hi);
+
+ data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
+ RB_ENABLE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
+
+ return 0;
+}
+
+static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs)
+{
+ uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
+ queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
+ uint32_t i = 0, reg;
+#undef HQD_N_REGS
+#define HQD_N_REGS (19+4+2+3+7)
+
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
+ DUMP_REG(sdma_offset + reg);
+ for (reg = mmSDMA0_RLC0_VIRTUAL_ADDR; reg <= mmSDMA0_RLC0_WATERMARK;
+ reg++)
+ DUMP_REG(sdma_offset + reg);
+ for (reg = mmSDMA0_RLC0_CSA_ADDR_LO; reg <= mmSDMA0_RLC0_CSA_ADDR_HI;
+ reg++)
+ DUMP_REG(sdma_offset + reg);
+ for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; reg <= mmSDMA0_RLC0_DUMMY_REG;
+ reg++)
+ DUMP_REG(sdma_offset + reg);
+ for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; reg <= mmSDMA0_RLC0_MIDCMD_CNTL;
+ reg++)
+ DUMP_REG(sdma_offset + reg);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
+
+ return 0;
+}
+
+static bool kgd_hqd_is_occupied(struct amdgpu_device *adev,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t inst)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t act;
bool retval = false;
uint32_t low, high;
- acquire_queue(kgd, pipe_id, queue_id);
+ acquire_queue(adev, pipe_id, queue_id);
act = RREG32(mmCP_HQD_ACTIVE);
if (act) {
low = lower_32_bits(queue_address >> 8);
@@ -326,21 +367,20 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
high == RREG32(mmCP_HQD_PQ_BASE_HI))
retval = true;
}
- release_queue(kgd);
+ release_queue(adev);
return retval;
}
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
- struct cik_sdma_rlc_registers *m;
- uint32_t sdma_base_addr;
+ struct vi_sdma_mqd *m;
+ uint32_t sdma_rlc_reg_offset;
uint32_t sdma_rlc_rb_cntl;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
return true;
@@ -348,115 +388,161 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
return false;
}
-static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
+static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd,
+ enum kfd_preempt_type reset_type,
unsigned int utimeout, uint32_t pipe_id,
- uint32_t queue_id)
+ uint32_t queue_id, uint32_t inst)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t temp;
- int timeout = utimeout;
+ enum hqd_dequeue_request_type type;
+ unsigned long flags, end_jiffies;
+ int retry;
+ struct vi_mqd *m = get_mqd(mqd);
+
+ if (amdgpu_in_reset(adev))
+ return -EIO;
+
+ acquire_queue(adev, pipe_id, queue_id);
+
+ if (m->cp_hqd_vmid == 0)
+ WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0);
- acquire_queue(kgd, pipe_id, queue_id);
+ switch (reset_type) {
+ case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
+ type = DRAIN_PIPE;
+ break;
+ case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
+ type = RESET_WAVES;
+ break;
+ default:
+ type = DRAIN_PIPE;
+ break;
+ }
+
+ /* Workaround: If IQ timer is active and the wait time is close to or
+ * equal to 0, dequeueing is not safe. Wait until either the wait time
+ * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
+ * cleared before continuing. Also, ensure wait times are set to at
+ * least 0x3.
+ */
+ local_irq_save(flags);
+ preempt_disable();
+ retry = 5000; /* wait for 500 usecs at maximum */
+ while (true) {
+ temp = RREG32(mmCP_HQD_IQ_TIMER);
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
+ pr_debug("HW is processing IQ\n");
+ goto loop;
+ }
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
+ == 3) /* SEM-rearm is safe */
+ break;
+ /* Wait time 3 is safe for CP, but our MMIO read/write
+ * time is close to 1 microsecond, so check for 10 to
+ * leave more buffer room
+ */
+ if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
+ >= 10)
+ break;
+ pr_debug("IQ timer is active\n");
+ } else
+ break;
+loop:
+ if (!retry) {
+ pr_err("CP HQD IQ timer status time out\n");
+ break;
+ }
+ ndelay(100);
+ --retry;
+ }
+ retry = 1000;
+ while (true) {
+ temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
+ if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
+ break;
+ pr_debug("Dequeue request is pending\n");
+
+ if (!retry) {
+ pr_err("CP HQD dequeue request time out\n");
+ break;
+ }
+ ndelay(100);
+ --retry;
+ }
+ local_irq_restore(flags);
+ preempt_enable();
- WREG32(mmCP_HQD_DEQUEUE_REQUEST, reset_type);
+ WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
+ end_jiffies = (utimeout * HZ / 1000) + jiffies;
while (true) {
temp = RREG32(mmCP_HQD_ACTIVE);
- if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
+ if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
break;
- if (timeout <= 0) {
- pr_err("kfd: cp queue preemption time out.\n");
- release_queue(kgd);
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("cp queue preemption time out.\n");
+ release_queue(adev);
return -ETIME;
}
- msleep(20);
- timeout -= 20;
+ usleep_range(500, 1000);
}
- release_queue(kgd);
+ release_queue(adev);
return 0;
}
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
unsigned int utimeout)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
- struct cik_sdma_rlc_registers *m;
- uint32_t sdma_base_addr;
+ struct vi_sdma_mqd *m;
+ uint32_t sdma_rlc_reg_offset;
uint32_t temp;
- int timeout = utimeout;
+ unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
m = get_sdma_mqd(mqd);
- sdma_base_addr = get_sdma_base_addr(m);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
while (true) {
- temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
- if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
+ if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
- if (timeout <= 0)
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
return -ETIME;
- msleep(20);
- timeout -= 20;
+ }
+ usleep_range(500, 1000);
}
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
-
- return 0;
-}
-
-static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
- uint8_t vmid)
-{
- uint32_t reg;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
-
- reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
-}
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
+ SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
-static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
- uint8_t vmid)
-{
- uint32_t reg;
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
- reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
- return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
+ return 0;
}
-static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
+static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
+ uint8_t vmid, uint16_t *p_pasid)
{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
+ uint32_t value;
- WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
-}
+ value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
-static int kgd_address_watch_disable(struct kgd_dev *kgd)
-{
- return 0;
-}
-
-static int kgd_address_watch_execute(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- uint32_t cntl_val,
- uint32_t addr_hi,
- uint32_t addr_lo)
-{
- return 0;
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
}
-static int kgd_wave_control_execute(struct kgd_dev *kgd,
+static int kgd_wave_control_execute(struct amdgpu_device *adev,
uint32_t gfx_index_val,
- uint32_t sq_cmd)
+ uint32_t sq_cmd, uint32_t inst)
{
- struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t data = 0;
mutex_lock(&adev->grbm_idx_mutex);
@@ -477,68 +563,48 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd,
return 0;
}
-static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
- unsigned int watch_point_id,
- unsigned int reg_offset)
+static void set_scratch_backing_va(struct amdgpu_device *adev,
+ uint64_t va, uint32_t vmid)
{
- return 0;
+ lock_srbm(adev, 0, 0, 0, vmid);
+ WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
+ unlock_srbm(adev);
}
-static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
+static void set_vm_context_page_table_base(struct amdgpu_device *adev,
+ uint32_t vmid, uint64_t page_table_base)
{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- const union amdgpu_firmware_header *hdr;
-
- BUG_ON(kgd == NULL);
-
- switch (type) {
- case KGD_ENGINE_PFP:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.pfp_fw->data;
- break;
-
- case KGD_ENGINE_ME:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.me_fw->data;
- break;
-
- case KGD_ENGINE_CE:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.ce_fw->data;
- break;
-
- case KGD_ENGINE_MEC1:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec_fw->data;
- break;
-
- case KGD_ENGINE_MEC2:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec2_fw->data;
- break;
-
- case KGD_ENGINE_RLC:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.rlc_fw->data;
- break;
-
- case KGD_ENGINE_SDMA1:
- hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[0].fw->data;
- break;
-
- case KGD_ENGINE_SDMA2:
- hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[1].fw->data;
- break;
-
- default:
- return 0;
+ if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
+ pr_err("trying to set page table base for wrong VMID\n");
+ return;
}
+ WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8,
+ lower_32_bits(page_table_base));
+}
- if (hdr == NULL)
- return 0;
+static uint32_t kgd_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
- /* Only 12 bit in use*/
- return hdr->common.ucode_version;
+{
+ return 0;
}
+
+const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
+ .program_sh_mem_settings = kgd_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_init_interrupts,
+ .hqd_load = kgd_hqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .wave_control_execute = kgd_wave_control_execute,
+ .get_atc_vmid_pasid_mapping_info =
+ get_atc_vmid_pasid_mapping_info,
+ .set_scratch_backing_va = set_scratch_backing_va,
+ .set_vm_context_page_table_base = set_vm_context_page_table_base,
+ .hqd_sdma_get_doorbell = kgd_hqd_sdma_get_doorbell,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
new file mode 100644
index 000000000000..088d09cc7a72
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -0,0 +1,1264 @@
+/*
+ * Copyright 2014-2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
+#include "gc/gc_9_0_offset.h"
+#include "gc/gc_9_0_sh_mask.h"
+#include "vega10_enum.h"
+#include "sdma0/sdma0_4_0_offset.h"
+#include "sdma0/sdma0_4_0_sh_mask.h"
+#include "sdma1/sdma1_4_0_offset.h"
+#include "sdma1/sdma1_4_0_sh_mask.h"
+#include "athub/athub_1_0_offset.h"
+#include "athub/athub_1_0_sh_mask.h"
+#include "oss/osssys_4_0_offset.h"
+#include "oss/osssys_4_0_sh_mask.h"
+#include "soc15_common.h"
+#include "v9_structs.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "gfx_v9_0.h"
+#include "amdgpu_amdkfd_gfx_v9.h"
+#include <uapi/linux/kfd_ioctl.h>
+
+enum hqd_dequeue_request_type {
+ NO_ACTION = 0,
+ DRAIN_PIPE,
+ RESET_WAVES,
+ SAVE_WAVES
+};
+
+static void kgd_gfx_v9_lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
+ uint32_t queue, uint32_t vmid, uint32_t inst)
+{
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, mec, pipe, queue, vmid, GET_INST(GC, inst));
+}
+
+static void kgd_gfx_v9_unlock_srbm(struct amdgpu_device *adev, uint32_t inst)
+{
+ soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, inst));
+ mutex_unlock(&adev->srbm_mutex);
+}
+
+void kgd_gfx_v9_acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t inst)
+{
+ uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+ kgd_gfx_v9_lock_srbm(adev, mec, pipe, queue_id, 0, inst);
+}
+
+uint64_t kgd_gfx_v9_get_queue_mask(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id)
+{
+ unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
+ queue_id;
+
+ return 1ull << bit;
+}
+
+void kgd_gfx_v9_release_queue(struct amdgpu_device *adev, uint32_t inst)
+{
+ kgd_gfx_v9_unlock_srbm(adev, inst);
+}
+
+void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
+ uint32_t sh_mem_config,
+ uint32_t sh_mem_ape1_base,
+ uint32_t sh_mem_ape1_limit,
+ uint32_t sh_mem_bases, uint32_t inst)
+{
+ kgd_gfx_v9_lock_srbm(adev, 0, 0, 0, vmid, inst);
+
+ WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmSH_MEM_CONFIG, sh_mem_config);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmSH_MEM_BASES, sh_mem_bases);
+ /* APE1 no longer exists on GFX9 */
+
+ kgd_gfx_v9_unlock_srbm(adev, inst);
+}
+
+int kgd_gfx_v9_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
+ unsigned int vmid, uint32_t inst)
+{
+ /*
+ * We have to assume that there is no outstanding mapping.
+ * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
+ * a mapping is in progress or because a mapping finished
+ * and the SW cleared it.
+ * So the protocol is to always wait & clear.
+ */
+ uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
+ ATC_VMID0_PASID_MAPPING__VALID_MASK;
+
+ /*
+ * need to do this twice, once for gfx and once for mmhub
+ * for ATC add 16 to VMID for mmhub, for IH different registers.
+ * ATC_VMID0..15 registers are separate from ATC_VMID16..31.
+ */
+
+ WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid,
+ pasid_mapping);
+
+ while (!(RREG32(SOC15_REG_OFFSET(
+ ATHUB, 0,
+ mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
+ (1U << vmid)))
+ cpu_relax();
+
+ WREG32(SOC15_REG_OFFSET(ATHUB, 0,
+ mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
+ 1U << vmid);
+
+ /* Mapping vmid to pasid also for IH block */
+ WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid,
+ pasid_mapping);
+
+ WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID16_PASID_MAPPING) + vmid,
+ pasid_mapping);
+
+ while (!(RREG32(SOC15_REG_OFFSET(
+ ATHUB, 0,
+ mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
+ (1U << (vmid + 16))))
+ cpu_relax();
+
+ WREG32(SOC15_REG_OFFSET(ATHUB, 0,
+ mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
+ 1U << (vmid + 16));
+
+ /* Mapping vmid to pasid also for IH block */
+ WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid,
+ pasid_mapping);
+ return 0;
+}
+
+/* TODO - RING0 form of field is obsolete, seems to date back to SI
+ * but still works
+ */
+
+int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id,
+ uint32_t inst)
+{
+ uint32_t mec;
+ uint32_t pipe;
+
+ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+ kgd_gfx_v9_lock_srbm(adev, mec, pipe, 0, 0, inst);
+
+ WREG32_SOC15(GC, GET_INST(GC, inst), mmCPC_INT_CNTL,
+ CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
+ CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
+
+ kgd_gfx_v9_unlock_srbm(adev, inst);
+
+ return 0;
+}
+
+static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
+ unsigned int engine_id,
+ unsigned int queue_id)
+{
+ uint32_t sdma_engine_reg_base = 0;
+ uint32_t sdma_rlc_reg_offset;
+
+ switch (engine_id) {
+ default:
+ dev_warn(adev->dev,
+ "Invalid sdma engine id (%d), using engine id 0\n",
+ engine_id);
+ fallthrough;
+ case 0:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+ mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
+ break;
+ case 1:
+ sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
+ mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
+ break;
+ }
+
+ sdma_rlc_reg_offset = sdma_engine_reg_base
+ + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
+
+ pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
+ queue_id, sdma_rlc_reg_offset);
+
+ return sdma_rlc_reg_offset;
+}
+
+static inline struct v9_mqd *get_mqd(void *mqd)
+{
+ return (struct v9_mqd *)mqd;
+}
+
+static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
+{
+ return (struct v9_sdma_mqd *)mqd;
+}
+
+int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t __user *wptr, uint32_t wptr_shift,
+ uint32_t wptr_mask, struct mm_struct *mm,
+ uint32_t inst)
+{
+ struct v9_mqd *m;
+ uint32_t *mqd_hqd;
+ uint32_t reg, hqd_base, data;
+
+ m = get_mqd(mqd);
+
+ kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
+
+ /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
+ mqd_hqd = &m->cp_mqd_base_addr_lo;
+ hqd_base = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_MQD_BASE_ADDR);
+
+ for (reg = hqd_base;
+ reg <= SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI); reg++)
+ WREG32_XCC(reg, mqd_hqd[reg - hqd_base], inst);
+
+
+ /* Activate doorbell logic before triggering WPTR poll. */
+ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
+ CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_DOORBELL_CONTROL, data);
+
+ if (wptr) {
+ /* Don't read wptr with get_user because the user
+ * context may not be accessible (if this function
+ * runs in a work queue). Instead trigger a one-shot
+ * polling read from memory in the CP. This assumes
+ * that wptr is GPU-accessible in the queue's VMID via
+ * ATC or SVM. WPTR==RPTR before starting the poll so
+ * the CP starts fetching new commands from the right
+ * place.
+ *
+ * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
+ * tricky. Assume that the queue didn't overflow. The
+ * number of valid bits in the 32-bit RPTR depends on
+ * the queue size. The remaining bits are taken from
+ * the saved 64-bit WPTR. If the WPTR wrapped, add the
+ * queue size.
+ */
+ uint32_t queue_size =
+ 2 << REG_GET_FIELD(m->cp_hqd_pq_control,
+ CP_HQD_PQ_CONTROL, QUEUE_SIZE);
+ uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
+
+ if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
+ guessed_wptr += queue_size;
+ guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
+ guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
+
+ WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_LO,
+ lower_32_bits(guessed_wptr));
+ WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI,
+ upper_32_bits(guessed_wptr));
+ WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR,
+ lower_32_bits((uintptr_t)wptr));
+ WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
+ upper_32_bits((uintptr_t)wptr));
+ WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_PQ_WPTR_POLL_CNTL1,
+ (uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, queue_id));
+ }
+
+ /* Start the EOP fetcher */
+ WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_EOP_RPTR,
+ REG_SET_FIELD(m->cp_hqd_eop_rptr, CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
+
+ data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
+ WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE, data);
+
+ kgd_gfx_v9_release_queue(adev, inst);
+
+ return 0;
+}
+
+int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t doorbell_off, uint32_t inst)
+{
+ struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[inst].ring;
+ struct v9_mqd *m;
+ uint32_t mec, pipe;
+ int r;
+
+ m = get_mqd(mqd);
+
+ kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
+
+ mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
+ pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
+
+ pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
+ mec, pipe, queue_id);
+
+ spin_lock(&adev->gfx.kiq[inst].ring_lock);
+ r = amdgpu_ring_alloc(kiq_ring, 7);
+ if (r) {
+ pr_err("Failed to alloc KIQ (%d).\n", r);
+ goto out_unlock;
+ }
+
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
+ PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
+ PACKET3_MAP_QUEUES_QUEUE(queue_id) |
+ PACKET3_MAP_QUEUES_PIPE(pipe) |
+ PACKET3_MAP_QUEUES_ME((mec - 1)) |
+ PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
+ PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
+ PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
+ PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
+ amdgpu_ring_write(kiq_ring,
+ PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
+ amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
+ amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
+ amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
+ amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
+ amdgpu_ring_commit(kiq_ring);
+
+out_unlock:
+ spin_unlock(&adev->gfx.kiq[inst].ring_lock);
+ kgd_gfx_v9_release_queue(adev, inst);
+
+ return r;
+}
+
+int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst)
+{
+ uint32_t i = 0, reg;
+#define HQD_N_REGS 56
+#define DUMP_REG(addr) do { \
+ if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
+ break; \
+ (*dump)[i][0] = (addr) << 2; \
+ (*dump)[i++][1] = RREG32(addr); \
+ } while (0)
+
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
+
+ for (reg = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_MQD_BASE_ADDR);
+ reg <= SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI); reg++)
+ DUMP_REG(reg);
+
+ kgd_gfx_v9_release_queue(adev, inst);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
+
+ return 0;
+}
+
+static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm)
+{
+ struct v9_sdma_mqd *m;
+ uint32_t sdma_rlc_reg_offset;
+ unsigned long end_jiffies;
+ uint32_t data;
+ uint64_t data64;
+ uint64_t __user *wptr64 = (uint64_t __user *)wptr;
+
+ m = get_sdma_mqd(mqd);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
+ m->sdma_queue_id);
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
+
+ end_jiffies = msecs_to_jiffies(2000) + jiffies;
+ while (true) {
+ data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
+ if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+ break;
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
+ return -ETIME;
+ }
+ usleep_range(500, 1000);
+ }
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
+ m->sdmax_rlcx_doorbell_offset);
+
+ data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
+ ENABLE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
+ m->sdmax_rlcx_rb_rptr_hi);
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
+ if (read_user_wptr(mm, wptr64, data64)) {
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
+ lower_32_bits(data64));
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
+ upper_32_bits(data64));
+ } else {
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
+ m->sdmax_rlcx_rb_rptr);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
+ m->sdmax_rlcx_rb_rptr_hi);
+ }
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
+ m->sdmax_rlcx_rb_base_hi);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
+ m->sdmax_rlcx_rb_rptr_addr_lo);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
+ m->sdmax_rlcx_rb_rptr_addr_hi);
+
+ data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
+ RB_ENABLE, 1);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
+
+ return 0;
+}
+
+static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs)
+{
+ uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
+ engine_id, queue_id);
+ uint32_t i = 0, reg;
+#undef HQD_N_REGS
+#define HQD_N_REGS (19+6+7+10)
+
+ *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
+ if (*dump == NULL)
+ return -ENOMEM;
+
+ for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+ for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+ for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
+ reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+ for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
+ reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
+ DUMP_REG(sdma_rlc_reg_offset + reg);
+
+ WARN_ON_ONCE(i != HQD_N_REGS);
+ *n_regs = i;
+
+ return 0;
+}
+
+bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t inst)
+{
+ uint32_t act;
+ bool retval = false;
+ uint32_t low, high;
+
+ kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
+ act = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE);
+ if (act) {
+ low = lower_32_bits(queue_address >> 8);
+ high = upper_32_bits(queue_address >> 8);
+
+ if (low == RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE) &&
+ high == RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE_HI))
+ retval = true;
+ }
+ kgd_gfx_v9_release_queue(adev, inst);
+ return retval;
+}
+
+static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
+{
+ struct v9_sdma_mqd *m;
+ uint32_t sdma_rlc_reg_offset;
+ uint32_t sdma_rlc_rb_cntl;
+
+ m = get_sdma_mqd(mqd);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
+ m->sdma_queue_id);
+
+ sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
+
+ if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
+ return true;
+
+ return false;
+}
+
+int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd,
+ enum kfd_preempt_type reset_type,
+ unsigned int utimeout, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t inst)
+{
+ enum hqd_dequeue_request_type type;
+ unsigned long end_jiffies;
+ uint32_t temp;
+ struct v9_mqd *m = get_mqd(mqd);
+
+ if (amdgpu_in_reset(adev))
+ return -EIO;
+
+ kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
+
+ if (m->cp_hqd_vmid == 0)
+ WREG32_FIELD15_RLC(GC, GET_INST(GC, inst), RLC_CP_SCHEDULERS, scheduler1, 0);
+
+ switch (reset_type) {
+ case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
+ type = DRAIN_PIPE;
+ break;
+ case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
+ type = RESET_WAVES;
+ break;
+ case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
+ type = SAVE_WAVES;
+ break;
+ default:
+ type = DRAIN_PIPE;
+ break;
+ }
+
+ WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_DEQUEUE_REQUEST, type);
+
+ end_jiffies = (utimeout * HZ / 1000) + jiffies;
+ while (true) {
+ temp = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE);
+ if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
+ break;
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("cp queue preemption time out.\n");
+ kgd_gfx_v9_release_queue(adev, inst);
+ return -ETIME;
+ }
+ usleep_range(500, 1000);
+ }
+
+ kgd_gfx_v9_release_queue(adev, inst);
+ return 0;
+}
+
+static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
+ unsigned int utimeout)
+{
+ struct v9_sdma_mqd *m;
+ uint32_t sdma_rlc_reg_offset;
+ uint32_t temp;
+ unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
+
+ m = get_sdma_mqd(mqd);
+ sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
+ m->sdma_queue_id);
+
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
+ temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
+
+ while (true) {
+ temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
+ if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+ break;
+ if (time_after(jiffies, end_jiffies)) {
+ pr_err("SDMA RLC not idle in %s\n", __func__);
+ return -ETIME;
+ }
+ usleep_range(500, 1000);
+ }
+
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
+ WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
+ SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
+
+ m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
+ m->sdmax_rlcx_rb_rptr_hi =
+ RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
+
+ return 0;
+}
+
+bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
+ uint8_t vmid, uint16_t *p_pasid)
+{
+ uint32_t value;
+
+ value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
+ + vmid);
+ *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
+
+ return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
+}
+
+int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev,
+ uint32_t gfx_index_val,
+ uint32_t sq_cmd, uint32_t inst)
+{
+ uint32_t data = 0;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ WREG32_SOC15_RLC_SHADOW(GC, GET_INST(GC, inst), mmGRBM_GFX_INDEX, gfx_index_val);
+ WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_CMD, sq_cmd);
+
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+ INSTANCE_BROADCAST_WRITES, 1);
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+ SH_BROADCAST_WRITES, 1);
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
+ SE_BROADCAST_WRITES, 1);
+
+ WREG32_SOC15_RLC_SHADOW(GC, GET_INST(GC, inst), mmGRBM_GFX_INDEX, data);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+/*
+ * GFX9 helper for wave launch stall requirements on debug trap setting.
+ *
+ * vmid:
+ * Target VMID to stall/unstall.
+ *
+ * stall:
+ * 0-unstall wave launch (enable), 1-stall wave launch (disable).
+ * After wavefront launch has been stalled, allocated waves must drain from
+ * SPI in order for debug trap settings to take effect on those waves.
+ * This is roughly a ~96 clock cycle wait on SPI where a read on
+ * SPI_GDBG_WAVE_CNTL translates to ~32 clock cycles.
+ * KGD_GFX_V9_WAVE_LAUNCH_SPI_DRAIN_LATENCY indicates the number of reads required.
+ *
+ * NOTE: We can afford to clear the entire STALL_VMID field on unstall
+ * because GFX9.4.1 cannot support multi-process debugging due to trap
+ * configuration and masking being limited to global scope. Always assume
+ * single process conditions.
+ */
+#define KGD_GFX_V9_WAVE_LAUNCH_SPI_DRAIN_LATENCY 3
+void kgd_gfx_v9_set_wave_launch_stall(struct amdgpu_device *adev,
+ uint32_t vmid,
+ bool stall)
+{
+ int i;
+ uint32_t data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1))
+ data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_VMID,
+ stall ? 1 << vmid : 0);
+ else
+ data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA,
+ stall ? 1 : 0);
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data);
+
+ if (!stall)
+ return;
+
+ for (i = 0; i < KGD_GFX_V9_WAVE_LAUNCH_SPI_DRAIN_LATENCY; i++)
+ RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
+}
+
+/*
+ * restore_dbg_registers is ignored here but is a general interface requirement
+ * for devices that support GFXOFF and where the RLC save/restore list
+ * does not support hw registers for debugging i.e. the driver has to manually
+ * initialize the debug mode registers after it has disabled GFX off during the
+ * debug session.
+ */
+uint32_t kgd_gfx_v9_enable_debug_trap(struct amdgpu_device *adev,
+ bool restore_dbg_registers,
+ uint32_t vmid)
+{
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true);
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
+
+ kgd_gfx_v9_set_wave_launch_stall(adev, vmid, false);
+
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+/*
+ * keep_trap_enabled is ignored here but is a general interface requirement
+ * for devices that support multi-process debugging where the performance
+ * overhead from trap temporary setup needs to be bypassed when the debug
+ * session has ended.
+ */
+uint32_t kgd_gfx_v9_disable_debug_trap(struct amdgpu_device *adev,
+ bool keep_trap_enabled,
+ uint32_t vmid)
+{
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true);
+
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
+
+ kgd_gfx_v9_set_wave_launch_stall(adev, vmid, false);
+
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+int kgd_gfx_v9_validate_trap_override_request(struct amdgpu_device *adev,
+ uint32_t trap_override,
+ uint32_t *trap_mask_supported)
+{
+ *trap_mask_supported &= KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH;
+
+ /* The SPI_GDBG_TRAP_MASK register is global and affects all
+ * processes. Only allow OR-ing the address-watch bit, since
+ * this only affects processes under the debugger. Other bits
+ * should stay 0 to avoid the debugger interfering with other
+ * processes.
+ */
+ if (trap_override != KFD_DBG_TRAP_OVERRIDE_OR)
+ return -EINVAL;
+
+ return 0;
+}
+
+uint32_t kgd_gfx_v9_set_wave_launch_trap_override(struct amdgpu_device *adev,
+ uint32_t vmid,
+ uint32_t trap_override,
+ uint32_t trap_mask_bits,
+ uint32_t trap_mask_request,
+ uint32_t *trap_mask_prev,
+ uint32_t kfd_dbg_cntl_prev)
+{
+ uint32_t data, wave_cntl_prev;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ wave_cntl_prev = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
+
+ kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true);
+
+ data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK));
+ *trap_mask_prev = REG_GET_FIELD(data, SPI_GDBG_TRAP_MASK, EXCP_EN);
+
+ trap_mask_bits = (trap_mask_bits & trap_mask_request) |
+ (*trap_mask_prev & ~trap_mask_request);
+
+ data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, EXCP_EN, trap_mask_bits);
+ data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, REPLACE, trap_override);
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data);
+
+ /* We need to preserve wave launch mode stall settings. */
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), wave_cntl_prev);
+
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+uint32_t kgd_gfx_v9_set_wave_launch_mode(struct amdgpu_device *adev,
+ uint8_t wave_launch_mode,
+ uint32_t vmid)
+{
+ uint32_t data = 0;
+ bool is_mode_set = !!wave_launch_mode;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true);
+
+ data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2,
+ VMID_MASK, is_mode_set ? 1 << vmid : 0);
+ data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2,
+ MODE, is_mode_set ? wave_launch_mode : 0);
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL2), data);
+
+ kgd_gfx_v9_set_wave_launch_stall(adev, vmid, false);
+
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+#define TCP_WATCH_STRIDE (mmTCP_WATCH1_ADDR_H - mmTCP_WATCH0_ADDR_H)
+uint32_t kgd_gfx_v9_set_address_watch(struct amdgpu_device *adev,
+ uint64_t watch_address,
+ uint32_t watch_address_mask,
+ uint32_t watch_id,
+ uint32_t watch_mode,
+ uint32_t debug_vmid,
+ uint32_t inst)
+{
+ uint32_t watch_address_high;
+ uint32_t watch_address_low;
+ uint32_t watch_address_cntl;
+
+ watch_address_cntl = 0;
+
+ watch_address_low = lower_32_bits(watch_address);
+ watch_address_high = upper_32_bits(watch_address) & 0xffff;
+
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ VMID,
+ debug_vmid);
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ MODE,
+ watch_mode);
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ MASK,
+ watch_address_mask >> 6);
+
+ /* Turning off this watch point until we set all the registers */
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ VALID,
+ 0);
+
+ WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_cntl);
+
+ WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_H) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_high);
+
+ WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_L) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_low);
+
+ /* Enable the watch point */
+ watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
+ TCP_WATCH0_CNTL,
+ VALID,
+ 1);
+
+ WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_cntl);
+
+ return 0;
+}
+
+uint32_t kgd_gfx_v9_clear_address_watch(struct amdgpu_device *adev,
+ uint32_t watch_id)
+{
+ uint32_t watch_address_cntl;
+
+ watch_address_cntl = 0;
+
+ WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) +
+ (watch_id * TCP_WATCH_STRIDE)),
+ watch_address_cntl);
+
+ return 0;
+}
+
+/* kgd_gfx_v9_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values
+ * The values read are:
+ * ib_offload_wait_time -- Wait Count for Indirect Buffer Offloads.
+ * atomic_offload_wait_time -- Wait Count for L2 and GDS Atomics Offloads.
+ * wrm_offload_wait_time -- Wait Count for WAIT_REG_MEM Offloads.
+ * gws_wait_time -- Wait Count for Global Wave Syncs.
+ * que_sleep_wait_time -- Wait Count for Dequeue Retry.
+ * sch_wave_wait_time -- Wait Count for Scheduling Wave Message.
+ * sem_rearm_wait_time -- Wait Count for Semaphore re-arm.
+ * deq_retry_wait_time -- Wait Count for Global Wave Syncs.
+ */
+void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev,
+ uint32_t *wait_times,
+ uint32_t inst)
+
+{
+ *wait_times = RREG32_SOC15_RLC(GC, GET_INST(GC, inst),
+ mmCP_IQ_WAIT_TIME2);
+}
+
+void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev,
+ uint32_t vmid, uint64_t page_table_base)
+{
+ if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
+ pr_err("trying to set page table base for wrong VMID %u\n",
+ vmid);
+ return;
+ }
+
+ adev->mmhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
+
+ adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
+}
+
+static void lock_spi_csq_mutexes(struct amdgpu_device *adev)
+{
+ mutex_lock(&adev->srbm_mutex);
+ mutex_lock(&adev->grbm_idx_mutex);
+
+}
+
+static void unlock_spi_csq_mutexes(struct amdgpu_device *adev)
+{
+ mutex_unlock(&adev->grbm_idx_mutex);
+ mutex_unlock(&adev->srbm_mutex);
+}
+
+/**
+ * get_wave_count: Read device registers to get number of waves in flight for
+ * a particular queue. The method also returns the VMID associated with the
+ * queue.
+ *
+ * @adev: Handle of device whose registers are to be read
+ * @queue_idx: Index of queue in the queue-map bit-field
+ * @queue_cnt: Stores the wave count and doorbell offset for an active queue
+ * @inst: xcc's instance number on a multi-XCC setup
+ */
+static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
+ struct kfd_cu_occupancy *queue_cnt, uint32_t inst)
+{
+ int pipe_idx;
+ int queue_slot;
+ unsigned int reg_val;
+ unsigned int wave_cnt;
+ /*
+ * Program GRBM with appropriate MEID, PIPEID, QUEUEID and VMID
+ * parameters to read out waves in flight. Get VMID if there are
+ * non-zero waves in flight.
+ */
+ pipe_idx = queue_idx / adev->gfx.mec.num_queue_per_pipe;
+ queue_slot = queue_idx % adev->gfx.mec.num_queue_per_pipe;
+ soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0, GET_INST(GC, inst));
+ reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
+ mmSPI_CSQ_WF_ACTIVE_COUNT_0) + queue_slot);
+ wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK;
+ if (wave_cnt != 0) {
+ queue_cnt->wave_cnt += wave_cnt;
+ queue_cnt->doorbell_off =
+ (RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_DOORBELL_CONTROL) &
+ CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK) >>
+ CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
+ }
+}
+
+/**
+ * kgd_gfx_v9_get_cu_occupancy: Reads relevant registers associated with each
+ * shader engine and aggregates the number of waves that are in flight for the
+ * process whose pasid is provided as a parameter. The process could have ZERO
+ * or more queues running and submitting waves to compute units.
+ *
+ * @adev: Handle of device from which to get number of waves in flight
+ * @cu_occupancy: Array that gets filled with wave_cnt and doorbell offset
+ * for comparison later.
+ * @max_waves_per_cu: Output parameter updated with maximum number of waves
+ * possible per Compute Unit
+ * @inst: xcc's instance number on a multi-XCC setup
+ *
+ * Note: It's possible that the device has too many queues (oversubscription)
+ * in which case a VMID could be remapped to a different PASID. This could lead
+ * to an inaccurate wave count. Following is a high-level sequence:
+ * Time T1: vmid = getVmid(); vmid is associated with Pasid P1
+ * Time T2: passId = getPasId(vmid); vmid is associated with Pasid P2
+ * In the sequence above wave count obtained from time T1 will be incorrectly
+ * lost or added to total wave count.
+ *
+ * The registers that provide the waves in flight are:
+ *
+ * SPI_CSQ_WF_ACTIVE_STATUS - bit-map of queues per pipe. The bit is ON if a
+ * queue is slotted, OFF if there is no queue. A process could have ZERO or
+ * more queues slotted and submitting waves to be run on compute units. Even
+ * when there is a queue it is possible there could be zero wave fronts, this
+ * can happen when queue is waiting on top-of-pipe events - e.g. waitRegMem
+ * command
+ *
+ * For each bit that is ON from above:
+ *
+ * Read (SPI_CSQ_WF_ACTIVE_COUNT_0 + queue_idx) register. It provides the
+ * number of waves that are in flight for the queue at specified index. The
+ * index ranges from 0 to 7.
+ *
+ * If non-zero waves are in flight, store the corresponding doorbell offset
+ * of the queue, along with the wave count.
+ *
+ * Determine if the queue belongs to the process by comparing the doorbell
+ * offset against the process's queues. If it matches, aggregate the wave
+ * count for the process.
+ *
+ * Reading registers referenced above involves programming GRBM appropriately
+ */
+void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev,
+ struct kfd_cu_occupancy *cu_occupancy,
+ int *max_waves_per_cu, uint32_t inst)
+{
+ int qidx;
+ int se_idx;
+ int se_cnt;
+ int queue_map;
+ int max_queue_cnt;
+ DECLARE_BITMAP(cp_queue_bitmap, AMDGPU_MAX_QUEUES);
+
+ lock_spi_csq_mutexes(adev);
+ soc15_grbm_select(adev, 1, 0, 0, 0, GET_INST(GC, inst));
+
+ /*
+ * Iterate through the shader engines and arrays of the device
+ * to get number of waves in flight
+ */
+ bitmap_complement(cp_queue_bitmap, adev->gfx.mec_bitmap[0].queue_bitmap,
+ AMDGPU_MAX_QUEUES);
+ max_queue_cnt = adev->gfx.mec.num_pipe_per_mec *
+ adev->gfx.mec.num_queue_per_pipe;
+ se_cnt = adev->gfx.config.max_shader_engines;
+ for (se_idx = 0; se_idx < se_cnt; se_idx++) {
+ amdgpu_gfx_select_se_sh(adev, se_idx, 0, 0xffffffff, inst);
+ queue_map = RREG32_SOC15(GC, GET_INST(GC, inst), mmSPI_CSQ_WF_ACTIVE_STATUS);
+
+ /*
+ * Assumption: queue map encodes following schema: four
+ * pipes per each micro-engine, with each pipe mapping
+ * eight queues. This schema is true for GFX9 devices
+ * and must be verified for newer device families
+ */
+ for (qidx = 0; qidx < max_queue_cnt; qidx++) {
+ /* Skip qeueus that are not associated with
+ * compute functions
+ */
+ if (!test_bit(qidx, cp_queue_bitmap))
+ continue;
+
+ if (!(queue_map & (1 << qidx)))
+ continue;
+
+ /* Get number of waves in flight and aggregate them */
+ get_wave_count(adev, qidx, &cu_occupancy[qidx],
+ inst);
+ }
+ }
+
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, inst);
+ soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, inst));
+ unlock_spi_csq_mutexes(adev);
+
+ /* Update the output parameters and return */
+ *max_waves_per_cu = adev->gfx.cu_info.simd_per_cu *
+ adev->gfx.cu_info.max_waves_per_simd;
+}
+
+void kgd_gfx_v9_build_dequeue_wait_counts_packet_info(struct amdgpu_device *adev,
+ uint32_t wait_times,
+ uint32_t sch_wave,
+ uint32_t que_sleep,
+ uint32_t *reg_offset,
+ uint32_t *reg_data)
+{
+ *reg_data = wait_times;
+
+ if (sch_wave)
+ *reg_data = REG_SET_FIELD(*reg_data,
+ CP_IQ_WAIT_TIME2,
+ SCH_WAVE,
+ sch_wave);
+ if (que_sleep)
+ *reg_data = REG_SET_FIELD(*reg_data,
+ CP_IQ_WAIT_TIME2,
+ QUE_SLEEP,
+ que_sleep);
+
+ *reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2);
+}
+
+void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev,
+ uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, uint32_t inst)
+{
+ kgd_gfx_v9_lock_srbm(adev, 0, 0, 0, vmid, inst);
+
+ /*
+ * Program TBA registers
+ */
+ WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TBA_LO,
+ lower_32_bits(tba_addr >> 8));
+ WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TBA_HI,
+ upper_32_bits(tba_addr >> 8));
+
+ /*
+ * Program TMA registers
+ */
+ WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TMA_LO,
+ lower_32_bits(tma_addr >> 8));
+ WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TMA_HI,
+ upper_32_bits(tma_addr >> 8));
+
+ kgd_gfx_v9_unlock_srbm(adev, inst);
+}
+
+uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst)
+{
+ uint32_t low, high;
+ uint64_t queue_addr = 0;
+
+ kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
+
+ if (!RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE))
+ goto unlock_out;
+
+ low = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE);
+ high = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE_HI);
+
+ /* only concerned with user queues. */
+ if (!high)
+ goto unlock_out;
+
+ queue_addr = (((queue_addr | high) << 32) | low) << 8;
+
+unlock_out:
+ amdgpu_gfx_rlc_exit_safe_mode(adev, inst);
+ kgd_gfx_v9_release_queue(adev, inst);
+
+ return queue_addr;
+}
+
+/* assume queue acquired */
+static int kgd_gfx_v9_hqd_dequeue_wait(struct amdgpu_device *adev, uint32_t inst,
+ unsigned int utimeout)
+{
+ unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
+
+ while (true) {
+ uint32_t temp = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE);
+
+ if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
+ return 0;
+
+ if (time_after(jiffies, end_jiffies))
+ return -ETIME;
+
+ usleep_range(500, 1000);
+ }
+}
+
+uint64_t kgd_gfx_v9_hqd_reset(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t inst, unsigned int utimeout)
+{
+ uint32_t low, high, pipe_reset_data = 0;
+ uint64_t queue_addr = 0;
+
+ kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
+
+ if (!RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE))
+ goto unlock_out;
+
+ low = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE);
+ high = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE_HI);
+
+ /* only concerned with user queues. */
+ if (!high)
+ goto unlock_out;
+
+ queue_addr = (((queue_addr | high) << 32) | low) << 8;
+
+ pr_debug("Attempting queue reset on XCC %i pipe id %i queue id %i\n",
+ inst, pipe_id, queue_id);
+
+ /* assume previous dequeue request issued will take affect after reset */
+ WREG32_SOC15(GC, GET_INST(GC, inst), mmSPI_COMPUTE_QUEUE_RESET, 0x1);
+
+ if (!kgd_gfx_v9_hqd_dequeue_wait(adev, inst, utimeout))
+ goto unlock_out;
+
+ pr_debug("Attempting pipe reset on XCC %i pipe id %i\n", inst, pipe_id);
+
+ pipe_reset_data = REG_SET_FIELD(pipe_reset_data, CP_MEC_CNTL, MEC_ME1_PIPE0_RESET, 1);
+ pipe_reset_data = pipe_reset_data << pipe_id;
+
+ WREG32_SOC15(GC, GET_INST(GC, inst), mmCP_MEC_CNTL, pipe_reset_data);
+ WREG32_SOC15(GC, GET_INST(GC, inst), mmCP_MEC_CNTL, 0);
+
+ if (kgd_gfx_v9_hqd_dequeue_wait(adev, inst, utimeout))
+ queue_addr = 0;
+
+unlock_out:
+ pr_debug("queue reset on XCC %i pipe id %i queue id %i %s\n",
+ inst, pipe_id, queue_id, !!queue_addr ? "succeeded!" : "failed!");
+ amdgpu_gfx_rlc_exit_safe_mode(adev, inst);
+ kgd_gfx_v9_release_queue(adev, inst);
+
+ return queue_addr;
+}
+
+uint32_t kgd_gfx_v9_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue)
+
+{
+ return 0;
+}
+
+const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
+ .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_gfx_v9_init_interrupts,
+ .hqd_load = kgd_gfx_v9_hqd_load,
+ .hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load,
+ .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_dump = kgd_gfx_v9_hqd_dump,
+ .hqd_sdma_dump = kgd_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_gfx_v9_hqd_destroy,
+ .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .wave_control_execute = kgd_gfx_v9_wave_control_execute,
+ .get_atc_vmid_pasid_mapping_info =
+ kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
+ .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
+ .enable_debug_trap = kgd_gfx_v9_enable_debug_trap,
+ .disable_debug_trap = kgd_gfx_v9_disable_debug_trap,
+ .validate_trap_override_request = kgd_gfx_v9_validate_trap_override_request,
+ .set_wave_launch_trap_override = kgd_gfx_v9_set_wave_launch_trap_override,
+ .set_wave_launch_mode = kgd_gfx_v9_set_wave_launch_mode,
+ .set_address_watch = kgd_gfx_v9_set_address_watch,
+ .clear_address_watch = kgd_gfx_v9_clear_address_watch,
+ .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
+ .build_dequeue_wait_counts_packet_info = kgd_gfx_v9_build_dequeue_wait_counts_packet_info,
+ .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
+ .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
+ .hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
+ .hqd_reset = kgd_gfx_v9_hqd_reset,
+ .hqd_sdma_get_doorbell = kgd_gfx_v9_hqd_sdma_get_doorbell
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
new file mode 100644
index 000000000000..704452ca62f8
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
+ uint32_t sh_mem_config,
+ uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
+ uint32_t sh_mem_bases, uint32_t inst);
+int kgd_gfx_v9_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
+ unsigned int vmid, uint32_t inst);
+int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id,
+ uint32_t inst);
+int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t __user *wptr,
+ uint32_t wptr_shift, uint32_t wptr_mask,
+ struct mm_struct *mm, uint32_t inst);
+int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t doorbell_off, uint32_t inst);
+int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst);
+bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev,
+ uint64_t queue_address, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t inst);
+int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd,
+ enum kfd_preempt_type reset_type,
+ unsigned int utimeout, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t inst);
+int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev,
+ uint32_t gfx_index_val,
+ uint32_t sq_cmd, uint32_t inst);
+bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
+ uint8_t vmid, uint16_t *p_pasid);
+void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev,
+ uint32_t vmid, uint64_t page_table_base);
+void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev,
+ struct kfd_cu_occupancy *cu_occupancy,
+ int *max_waves_per_cu, uint32_t inst);
+void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev,
+ uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr,
+ uint32_t inst);
+void kgd_gfx_v9_acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t inst);
+uint64_t kgd_gfx_v9_get_queue_mask(struct amdgpu_device *adev,
+ uint32_t pipe_id, uint32_t queue_id);
+void kgd_gfx_v9_release_queue(struct amdgpu_device *adev, uint32_t inst);
+void kgd_gfx_v9_set_wave_launch_stall(struct amdgpu_device *adev,
+ uint32_t vmid,
+ bool stall);
+uint32_t kgd_gfx_v9_enable_debug_trap(struct amdgpu_device *adev,
+ bool restore_dbg_registers,
+ uint32_t vmid);
+uint32_t kgd_gfx_v9_disable_debug_trap(struct amdgpu_device *adev,
+ bool keep_trap_enabled,
+ uint32_t vmid);
+int kgd_gfx_v9_validate_trap_override_request(struct amdgpu_device *adev,
+ uint32_t trap_override,
+ uint32_t *trap_mask_supported);
+uint32_t kgd_gfx_v9_set_wave_launch_mode(struct amdgpu_device *adev,
+ uint8_t wave_launch_mode,
+ uint32_t vmid);
+uint32_t kgd_gfx_v9_set_wave_launch_trap_override(struct amdgpu_device *adev,
+ uint32_t vmid,
+ uint32_t trap_override,
+ uint32_t trap_mask_bits,
+ uint32_t trap_mask_request,
+ uint32_t *trap_mask_prev,
+ uint32_t kfd_dbg_trap_cntl_prev);
+uint32_t kgd_gfx_v9_set_address_watch(struct amdgpu_device *adev,
+ uint64_t watch_address,
+ uint32_t watch_address_mask,
+ uint32_t watch_id,
+ uint32_t watch_mode,
+ uint32_t debug_vmid,
+ uint32_t inst);
+uint32_t kgd_gfx_v9_clear_address_watch(struct amdgpu_device *adev,
+ uint32_t watch_id);
+void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev,
+ uint32_t *wait_times,
+ uint32_t inst);
+void kgd_gfx_v9_build_dequeue_wait_counts_packet_info(struct amdgpu_device *adev,
+ uint32_t wait_times,
+ uint32_t sch_wave,
+ uint32_t que_sleep,
+ uint32_t *reg_offset,
+ uint32_t *reg_data);
+uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev,
+ uint32_t pipe_id,
+ uint32_t queue_id,
+ uint32_t inst);
+uint64_t kgd_gfx_v9_hqd_reset(struct amdgpu_device *adev,
+ uint32_t pipe_id,
+ uint32_t queue_id,
+ uint32_t inst,
+ unsigned int utimeout);
+uint32_t kgd_gfx_v9_hqd_sdma_get_doorbell(struct amdgpu_device *adev,
+ int engine, int queue);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
new file mode 100644
index 000000000000..83020963dfde
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -0,0 +1,3239 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2014-2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <linux/dma-buf.h>
+#include <linux/list.h>
+#include <linux/pagemap.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/task.h>
+#include <drm/ttm/ttm_tt.h>
+
+#include <drm/drm_exec.h>
+
+#include "amdgpu_object.h"
+#include "amdgpu_gem.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_hmm.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_dma_buf.h"
+#include <uapi/linux/kfd_ioctl.h>
+#include "amdgpu_xgmi.h"
+#include "kfd_priv.h"
+#include "kfd_smi_events.h"
+
+/* Userptr restore delay, just long enough to allow consecutive VM
+ * changes to accumulate
+ */
+#define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
+#define AMDGPU_RESERVE_MEM_LIMIT (3UL << 29)
+
+/*
+ * Align VRAM availability to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
+ * BO chunk
+ */
+#define VRAM_AVAILABLITY_ALIGN (1 << 21)
+
+/* Impose limit on how much memory KFD can use */
+static struct {
+ uint64_t max_system_mem_limit;
+ uint64_t max_ttm_mem_limit;
+ int64_t system_mem_used;
+ int64_t ttm_mem_used;
+ spinlock_t mem_limit_lock;
+} kfd_mem_limit;
+
+static const char * const domain_bit_to_string[] = {
+ "CPU",
+ "GTT",
+ "VRAM",
+ "GDS",
+ "GWS",
+ "OA"
+};
+
+#define domain_string(domain) domain_bit_to_string[ffs(domain)-1]
+
+static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work);
+
+static bool kfd_mem_is_attached(struct amdgpu_vm *avm,
+ struct kgd_mem *mem)
+{
+ struct kfd_mem_attachment *entry;
+
+ list_for_each_entry(entry, &mem->attachments, list)
+ if (entry->bo_va->base.vm == avm)
+ return true;
+
+ return false;
+}
+
+/**
+ * reuse_dmamap() - Check whether adev can share the original
+ * userptr BO
+ *
+ * If both adev and bo_adev are in direct mapping or
+ * in the same iommu group, they can share the original BO.
+ *
+ * @adev: Device to which can or cannot share the original BO
+ * @bo_adev: Device to which allocated BO belongs to
+ *
+ * Return: returns true if adev can share original userptr BO,
+ * false otherwise.
+ */
+static bool reuse_dmamap(struct amdgpu_device *adev, struct amdgpu_device *bo_adev)
+{
+ return (adev->ram_is_direct_mapped && bo_adev->ram_is_direct_mapped) ||
+ (adev->dev->iommu_group == bo_adev->dev->iommu_group);
+}
+
+/* Set memory usage limits. Current, limits are
+ * System (TTM + userptr) memory - 15/16th System RAM
+ * TTM memory - 3/8th System RAM
+ */
+void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
+{
+ struct sysinfo si;
+ uint64_t mem;
+
+ if (kfd_mem_limit.max_system_mem_limit)
+ return;
+
+ si_meminfo(&si);
+ mem = si.totalram - si.totalhigh;
+ mem *= si.mem_unit;
+
+ spin_lock_init(&kfd_mem_limit.mem_limit_lock);
+ kfd_mem_limit.max_system_mem_limit = mem - (mem >> 6);
+ if (kfd_mem_limit.max_system_mem_limit < 2 * AMDGPU_RESERVE_MEM_LIMIT)
+ kfd_mem_limit.max_system_mem_limit >>= 1;
+ else
+ kfd_mem_limit.max_system_mem_limit -= AMDGPU_RESERVE_MEM_LIMIT;
+
+ kfd_mem_limit.max_ttm_mem_limit = ttm_tt_pages_limit() << PAGE_SHIFT;
+ pr_debug("Kernel memory limit %lluM, TTM limit %lluM\n",
+ (kfd_mem_limit.max_system_mem_limit >> 20),
+ (kfd_mem_limit.max_ttm_mem_limit >> 20));
+}
+
+void amdgpu_amdkfd_reserve_system_mem(uint64_t size)
+{
+ kfd_mem_limit.system_mem_used += size;
+}
+
+/* Estimate page table size needed to represent a given memory size
+ *
+ * With 4KB pages, we need one 8 byte PTE for each 4KB of memory
+ * (factor 512, >> 9). With 2MB pages, we need one 8 byte PTE for 2MB
+ * of memory (factor 256K, >> 18). ROCm user mode tries to optimize
+ * for 2MB pages for TLB efficiency. However, small allocations and
+ * fragmented system memory still need some 4KB pages. We choose a
+ * compromise that should work in most cases without reserving too
+ * much memory for page tables unnecessarily (factor 16K, >> 14).
+ */
+
+#define ESTIMATE_PT_SIZE(mem_size) max(((mem_size) >> 14), AMDGPU_VM_RESERVED_VRAM)
+
+/**
+ * amdgpu_amdkfd_reserve_mem_limit() - Decrease available memory by size
+ * of buffer.
+ *
+ * @adev: Device to which allocated BO belongs to
+ * @size: Size of buffer, in bytes, encapsulated by B0. This should be
+ * equivalent to amdgpu_bo_size(BO)
+ * @alloc_flag: Flag used in allocating a BO as noted above
+ * @xcp_id: xcp_id is used to get xcp from xcp manager, one xcp is
+ * managed as one compute node in driver for app
+ *
+ * Return:
+ * returns -ENOMEM in case of error, ZERO otherwise
+ */
+int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
+ uint64_t size, u32 alloc_flag, int8_t xcp_id)
+{
+ uint64_t reserved_for_pt =
+ ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ uint64_t reserved_for_ras = (con ? con->reserved_pages_in_bytes : 0);
+ size_t system_mem_needed, ttm_mem_needed, vram_needed;
+ int ret = 0;
+ uint64_t vram_size = 0;
+
+ system_mem_needed = 0;
+ ttm_mem_needed = 0;
+ vram_needed = 0;
+ if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
+ system_mem_needed = size;
+ ttm_mem_needed = size;
+ } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
+ /*
+ * Conservatively round up the allocation requirement to 2 MB
+ * to avoid fragmentation caused by 4K allocations in the tail
+ * 2M BO chunk.
+ */
+ vram_needed = size;
+ /*
+ * For GFX 9.4.3, get the VRAM size from XCP structs
+ */
+ if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id))
+ return -EINVAL;
+
+ vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id);
+ if (adev->apu_prefer_gtt) {
+ system_mem_needed = size;
+ ttm_mem_needed = size;
+ }
+ } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
+ system_mem_needed = size;
+ } else if (!(alloc_flag &
+ (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
+ KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
+ pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
+ return -ENOMEM;
+ }
+
+ spin_lock(&kfd_mem_limit.mem_limit_lock);
+
+ if (kfd_mem_limit.system_mem_used + system_mem_needed >
+ kfd_mem_limit.max_system_mem_limit) {
+ pr_debug("Set no_system_mem_limit=1 if using shared memory\n");
+ if (!no_system_mem_limit) {
+ ret = -ENOMEM;
+ goto release;
+ }
+ }
+
+ if (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
+ kfd_mem_limit.max_ttm_mem_limit) {
+ ret = -ENOMEM;
+ goto release;
+ }
+
+ /*if is_app_apu is false and apu_prefer_gtt is true, it is an APU with
+ * carve out < gtt. In that case, VRAM allocation will go to gtt domain, skip
+ * VRAM check since ttm_mem_limit check already cover this allocation
+ */
+
+ if (adev && xcp_id >= 0 && (!adev->apu_prefer_gtt || adev->gmc.is_app_apu)) {
+ uint64_t vram_available =
+ vram_size - reserved_for_pt - reserved_for_ras -
+ atomic64_read(&adev->vram_pin_size);
+ if (adev->kfd.vram_used[xcp_id] + vram_needed > vram_available) {
+ ret = -ENOMEM;
+ goto release;
+ }
+ }
+
+ /* Update memory accounting by decreasing available system
+ * memory, TTM memory and GPU memory as computed above
+ */
+ WARN_ONCE(vram_needed && !adev,
+ "adev reference can't be null when vram is used");
+ if (adev && xcp_id >= 0) {
+ adev->kfd.vram_used[xcp_id] += vram_needed;
+ adev->kfd.vram_used_aligned[xcp_id] +=
+ adev->apu_prefer_gtt ?
+ vram_needed :
+ ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
+ }
+ kfd_mem_limit.system_mem_used += system_mem_needed;
+ kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
+
+release:
+ spin_unlock(&kfd_mem_limit.mem_limit_lock);
+ return ret;
+}
+
+void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
+ uint64_t size, u32 alloc_flag, int8_t xcp_id)
+{
+ spin_lock(&kfd_mem_limit.mem_limit_lock);
+
+ if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
+ kfd_mem_limit.system_mem_used -= size;
+ kfd_mem_limit.ttm_mem_used -= size;
+ } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
+ WARN_ONCE(!adev,
+ "adev reference can't be null when alloc mem flags vram is set");
+ if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id))
+ goto release;
+
+ if (adev) {
+ adev->kfd.vram_used[xcp_id] -= size;
+ if (adev->apu_prefer_gtt) {
+ adev->kfd.vram_used_aligned[xcp_id] -= size;
+ kfd_mem_limit.system_mem_used -= size;
+ kfd_mem_limit.ttm_mem_used -= size;
+ } else {
+ adev->kfd.vram_used_aligned[xcp_id] -=
+ ALIGN(size, VRAM_AVAILABLITY_ALIGN);
+ }
+ }
+ } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
+ kfd_mem_limit.system_mem_used -= size;
+ } else if (!(alloc_flag &
+ (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
+ KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
+ pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag);
+ goto release;
+ }
+ WARN_ONCE(adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] < 0,
+ "KFD VRAM memory accounting unbalanced for xcp: %d", xcp_id);
+ WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0,
+ "KFD TTM memory accounting unbalanced");
+ WARN_ONCE(kfd_mem_limit.system_mem_used < 0,
+ "KFD system memory accounting unbalanced");
+
+release:
+ spin_unlock(&kfd_mem_limit.mem_limit_lock);
+}
+
+void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ u32 alloc_flags = bo->kfd_bo->alloc_flags;
+ u64 size = amdgpu_bo_size(bo);
+
+ amdgpu_amdkfd_unreserve_mem_limit(adev, size, alloc_flags,
+ bo->xcp_id);
+
+ kfree(bo->kfd_bo);
+}
+
+/**
+ * create_dmamap_sg_bo() - Creates a amdgpu_bo object to reflect information
+ * about USERPTR or DOOREBELL or MMIO BO.
+ *
+ * @adev: Device for which dmamap BO is being created
+ * @mem: BO of peer device that is being DMA mapped. Provides parameters
+ * in building the dmamap BO
+ * @bo_out: Output parameter updated with handle of dmamap BO
+ */
+static int
+create_dmamap_sg_bo(struct amdgpu_device *adev,
+ struct kgd_mem *mem, struct amdgpu_bo **bo_out)
+{
+ struct drm_gem_object *gem_obj;
+ int ret;
+ uint64_t flags = 0;
+
+ ret = amdgpu_bo_reserve(mem->bo, false);
+ if (ret)
+ return ret;
+
+ if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)
+ flags |= mem->bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
+ AMDGPU_GEM_CREATE_UNCACHED);
+
+ ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, 1,
+ AMDGPU_GEM_DOMAIN_CPU, AMDGPU_GEM_CREATE_PREEMPTIBLE | flags,
+ ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj, 0);
+
+ amdgpu_bo_unreserve(mem->bo);
+
+ if (ret) {
+ pr_err("Error in creating DMA mappable SG BO on domain: %d\n", ret);
+ return -EINVAL;
+ }
+
+ *bo_out = gem_to_amdgpu_bo(gem_obj);
+ (*bo_out)->parent = amdgpu_bo_ref(mem->bo);
+ return ret;
+}
+
+/* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's
+ * reservation object.
+ *
+ * @bo: [IN] Remove eviction fence(s) from this BO
+ * @ef: [IN] This eviction fence is removed if it
+ * is present in the shared list.
+ *
+ * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
+ */
+static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
+ struct amdgpu_amdkfd_fence *ef)
+{
+ struct dma_fence *replacement;
+
+ if (!ef)
+ return -EINVAL;
+
+ /* TODO: Instead of block before we should use the fence of the page
+ * table update and TLB flush here directly.
+ */
+ replacement = dma_fence_get_stub();
+ dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
+ replacement, DMA_RESV_USAGE_BOOKKEEP);
+ dma_fence_put(replacement);
+ return 0;
+}
+
+/**
+ * amdgpu_amdkfd_remove_all_eviction_fences - Remove all eviction fences
+ * @bo: the BO where to remove the evictions fences from.
+ *
+ * This functions should only be used on release when all references to the BO
+ * are already dropped. We remove the eviction fence from the private copy of
+ * the dma_resv object here since that is what is used during release to
+ * determine of the BO is idle or not.
+ */
+void amdgpu_amdkfd_remove_all_eviction_fences(struct amdgpu_bo *bo)
+{
+ struct dma_resv *resv = &bo->tbo.base._resv;
+ struct dma_fence *fence, *stub;
+ struct dma_resv_iter cursor;
+
+ dma_resv_assert_held(resv);
+
+ stub = dma_fence_get_stub();
+ dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
+ if (!to_amdgpu_amdkfd_fence(fence))
+ continue;
+
+ dma_resv_replace_fences(resv, fence->context, stub,
+ DMA_RESV_USAGE_BOOKKEEP);
+ }
+ dma_fence_put(stub);
+}
+
+static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
+ bool wait)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ int ret;
+
+ if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm),
+ "Called with userptr BO"))
+ return -EINVAL;
+
+ /* bo has been pinned, not need validate it */
+ if (bo->tbo.pin_count)
+ return 0;
+
+ amdgpu_bo_placement_from_domain(bo, domain);
+
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ goto validate_fail;
+ if (wait)
+ amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
+
+validate_fail:
+ return ret;
+}
+
+int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
+ uint32_t domain,
+ struct dma_fence *fence)
+{
+ int ret = amdgpu_bo_reserve(bo, false);
+
+ if (ret)
+ return ret;
+
+ ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
+ if (ret)
+ goto unreserve_out;
+
+ ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
+ if (ret)
+ goto unreserve_out;
+
+ dma_resv_add_fence(bo->tbo.base.resv, fence,
+ DMA_RESV_USAGE_BOOKKEEP);
+
+unreserve_out:
+ amdgpu_bo_unreserve(bo);
+
+ return ret;
+}
+
+static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
+{
+ return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
+}
+
+/* vm_validate_pt_pd_bos - Validate page table and directory BOs
+ *
+ * Page directories are not updated here because huge page handling
+ * during page table updates can invalidate page directory entries
+ * again. Page directories are only updated after updating page
+ * tables.
+ */
+static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm,
+ struct ww_acquire_ctx *ticket)
+{
+ struct amdgpu_bo *pd = vm->root.bo;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
+ int ret;
+
+ ret = amdgpu_vm_validate(adev, vm, ticket,
+ amdgpu_amdkfd_validate_vm_bo, NULL);
+ if (ret) {
+ pr_err("failed to validate PT BOs\n");
+ return ret;
+ }
+
+ vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo);
+
+ return 0;
+}
+
+static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
+{
+ struct amdgpu_bo *pd = vm->root.bo;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
+ int ret;
+
+ ret = amdgpu_vm_update_pdes(adev, vm, false);
+ if (ret)
+ return ret;
+
+ return amdgpu_sync_fence(sync, vm->last_update, GFP_KERNEL);
+}
+
+static uint64_t get_pte_flags(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct kgd_mem *mem)
+{
+ uint32_t mapping_flags = AMDGPU_VM_PAGE_READABLE |
+ AMDGPU_VM_MTYPE_DEFAULT;
+
+ if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
+ mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
+ if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
+ mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
+
+ return mapping_flags;
+}
+
+/**
+ * create_sg_table() - Create an sg_table for a contiguous DMA addr range
+ * @addr: The starting address to point to
+ * @size: Size of memory area in bytes being pointed to
+ *
+ * Allocates an instance of sg_table and initializes it to point to memory
+ * area specified by input parameters. The address used to build is assumed
+ * to be DMA mapped, if needed.
+ *
+ * DOORBELL or MMIO BOs use only one scatterlist node in their sg_table
+ * because they are physically contiguous.
+ *
+ * Return: Initialized instance of SG Table or NULL
+ */
+static struct sg_table *create_sg_table(uint64_t addr, uint32_t size)
+{
+ struct sg_table *sg = kmalloc(sizeof(*sg), GFP_KERNEL);
+
+ if (!sg)
+ return NULL;
+ if (sg_alloc_table(sg, 1, GFP_KERNEL)) {
+ kfree(sg);
+ return NULL;
+ }
+ sg_dma_address(sg->sgl) = addr;
+ sg->sgl->length = size;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg->sgl->dma_length = size;
+#endif
+ return sg;
+}
+
+static int
+kfd_mem_dmamap_userptr(struct kgd_mem *mem,
+ struct kfd_mem_attachment *attachment)
+{
+ enum dma_data_direction direction =
+ mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+ struct ttm_operation_ctx ctx = {.interruptible = true};
+ struct amdgpu_bo *bo = attachment->bo_va->base.bo;
+ struct amdgpu_device *adev = attachment->adev;
+ struct ttm_tt *src_ttm = mem->bo->tbo.ttm;
+ struct ttm_tt *ttm = bo->tbo.ttm;
+ int ret;
+
+ if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
+ return -EINVAL;
+
+ ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
+ if (unlikely(!ttm->sg))
+ return -ENOMEM;
+
+ /* Same sequence as in amdgpu_ttm_tt_pin_userptr */
+ ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
+ ttm->num_pages, 0,
+ (u64)ttm->num_pages << PAGE_SHIFT,
+ GFP_KERNEL);
+ if (unlikely(ret))
+ goto free_sg;
+
+ ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
+ if (unlikely(ret))
+ goto release_sg;
+
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ goto unmap_sg;
+
+ return 0;
+
+unmap_sg:
+ dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
+release_sg:
+ pr_err("DMA map userptr failed: %d\n", ret);
+ sg_free_table(ttm->sg);
+free_sg:
+ kfree(ttm->sg);
+ ttm->sg = NULL;
+ return ret;
+}
+
+static int
+kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment)
+{
+ struct ttm_operation_ctx ctx = {.interruptible = true};
+ struct amdgpu_bo *bo = attachment->bo_va->base.bo;
+
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+}
+
+/**
+ * kfd_mem_dmamap_sg_bo() - Create DMA mapped sg_table to access DOORBELL or MMIO BO
+ * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
+ * @attachment: Virtual address attachment of the BO on accessing device
+ *
+ * An access request from the device that owns DOORBELL does not require DMA mapping.
+ * This is because the request doesn't go through PCIe root complex i.e. it instead
+ * loops back. The need to DMA map arises only when accessing peer device's DOORBELL
+ *
+ * In contrast, all access requests for MMIO need to be DMA mapped without regard to
+ * device ownership. This is because access requests for MMIO go through PCIe root
+ * complex.
+ *
+ * This is accomplished in two steps:
+ * - Obtain DMA mapped address of DOORBELL or MMIO memory that could be used
+ * in updating requesting device's page table
+ * - Signal TTM to mark memory pointed to by requesting device's BO as GPU
+ * accessible. This allows an update of requesting device's page table
+ * with entries associated with DOOREBELL or MMIO memory
+ *
+ * This method is invoked in the following contexts:
+ * - Mapping of DOORBELL or MMIO BO of same or peer device
+ * - Validating an evicted DOOREBELL or MMIO BO on device seeking access
+ *
+ * Return: ZERO if successful, NON-ZERO otherwise
+ */
+static int
+kfd_mem_dmamap_sg_bo(struct kgd_mem *mem,
+ struct kfd_mem_attachment *attachment)
+{
+ struct ttm_operation_ctx ctx = {.interruptible = true};
+ struct amdgpu_bo *bo = attachment->bo_va->base.bo;
+ struct amdgpu_device *adev = attachment->adev;
+ struct ttm_tt *ttm = bo->tbo.ttm;
+ enum dma_data_direction dir;
+ dma_addr_t dma_addr;
+ bool mmio;
+ int ret;
+
+ /* Expect SG Table of dmapmap BO to be NULL */
+ mmio = (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP);
+ if (unlikely(ttm->sg)) {
+ pr_err("SG Table of %d BO for peer device is UNEXPECTEDLY NON-NULL", mmio);
+ return -EINVAL;
+ }
+
+ dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+ dma_addr = mem->bo->tbo.sg->sgl->dma_address;
+ pr_debug("%d BO size: %d\n", mmio, mem->bo->tbo.sg->sgl->length);
+ pr_debug("%d BO address before DMA mapping: %llx\n", mmio, dma_addr);
+ dma_addr = dma_map_resource(adev->dev, dma_addr,
+ mem->bo->tbo.sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC);
+ ret = dma_mapping_error(adev->dev, dma_addr);
+ if (unlikely(ret))
+ return ret;
+ pr_debug("%d BO address after DMA mapping: %llx\n", mmio, dma_addr);
+
+ ttm->sg = create_sg_table(dma_addr, mem->bo->tbo.sg->sgl->length);
+ if (unlikely(!ttm->sg)) {
+ ret = -ENOMEM;
+ goto unmap_sg;
+ }
+
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (unlikely(ret))
+ goto free_sg;
+
+ return ret;
+
+free_sg:
+ sg_free_table(ttm->sg);
+ kfree(ttm->sg);
+ ttm->sg = NULL;
+unmap_sg:
+ dma_unmap_resource(adev->dev, dma_addr, mem->bo->tbo.sg->sgl->length,
+ dir, DMA_ATTR_SKIP_CPU_SYNC);
+ return ret;
+}
+
+static int
+kfd_mem_dmamap_attachment(struct kgd_mem *mem,
+ struct kfd_mem_attachment *attachment)
+{
+ switch (attachment->type) {
+ case KFD_MEM_ATT_SHARED:
+ return 0;
+ case KFD_MEM_ATT_USERPTR:
+ return kfd_mem_dmamap_userptr(mem, attachment);
+ case KFD_MEM_ATT_DMABUF:
+ return kfd_mem_dmamap_dmabuf(attachment);
+ case KFD_MEM_ATT_SG:
+ return kfd_mem_dmamap_sg_bo(mem, attachment);
+ default:
+ WARN_ON_ONCE(1);
+ }
+ return -EINVAL;
+}
+
+static void
+kfd_mem_dmaunmap_userptr(struct kgd_mem *mem,
+ struct kfd_mem_attachment *attachment)
+{
+ enum dma_data_direction direction =
+ mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+ struct ttm_operation_ctx ctx = {.interruptible = false};
+ struct amdgpu_bo *bo = attachment->bo_va->base.bo;
+ struct amdgpu_device *adev = attachment->adev;
+ struct ttm_tt *ttm = bo->tbo.ttm;
+
+ if (unlikely(!ttm->sg))
+ return;
+
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+ (void)ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+
+ dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
+ sg_free_table(ttm->sg);
+ kfree(ttm->sg);
+ ttm->sg = NULL;
+}
+
+static void
+kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment)
+{
+ /* This is a no-op. We don't want to trigger eviction fences when
+ * unmapping DMABufs. Therefore the invalidation (moving to system
+ * domain) is done in kfd_mem_dmamap_dmabuf.
+ */
+}
+
+/**
+ * kfd_mem_dmaunmap_sg_bo() - Free DMA mapped sg_table of DOORBELL or MMIO BO
+ * @mem: SG BO of the DOORBELL or MMIO resource on the owning device
+ * @attachment: Virtual address attachment of the BO on accessing device
+ *
+ * The method performs following steps:
+ * - Signal TTM to mark memory pointed to by BO as GPU inaccessible
+ * - Free SG Table that is used to encapsulate DMA mapped memory of
+ * peer device's DOORBELL or MMIO memory
+ *
+ * This method is invoked in the following contexts:
+ * UNMapping of DOORBELL or MMIO BO on a device having access to its memory
+ * Eviction of DOOREBELL or MMIO BO on device having access to its memory
+ *
+ * Return: void
+ */
+static void
+kfd_mem_dmaunmap_sg_bo(struct kgd_mem *mem,
+ struct kfd_mem_attachment *attachment)
+{
+ struct ttm_operation_ctx ctx = {.interruptible = true};
+ struct amdgpu_bo *bo = attachment->bo_va->base.bo;
+ struct amdgpu_device *adev = attachment->adev;
+ struct ttm_tt *ttm = bo->tbo.ttm;
+ enum dma_data_direction dir;
+
+ if (unlikely(!ttm->sg)) {
+ pr_debug("SG Table of BO is NULL");
+ return;
+ }
+
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+ (void)ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+
+ dir = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
+ DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+ dma_unmap_resource(adev->dev, ttm->sg->sgl->dma_address,
+ ttm->sg->sgl->length, dir, DMA_ATTR_SKIP_CPU_SYNC);
+ sg_free_table(ttm->sg);
+ kfree(ttm->sg);
+ ttm->sg = NULL;
+ bo->tbo.sg = NULL;
+}
+
+static void
+kfd_mem_dmaunmap_attachment(struct kgd_mem *mem,
+ struct kfd_mem_attachment *attachment)
+{
+ switch (attachment->type) {
+ case KFD_MEM_ATT_SHARED:
+ break;
+ case KFD_MEM_ATT_USERPTR:
+ kfd_mem_dmaunmap_userptr(mem, attachment);
+ break;
+ case KFD_MEM_ATT_DMABUF:
+ kfd_mem_dmaunmap_dmabuf(attachment);
+ break;
+ case KFD_MEM_ATT_SG:
+ kfd_mem_dmaunmap_sg_bo(mem, attachment);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+}
+
+static int kfd_mem_export_dmabuf(struct kgd_mem *mem)
+{
+ if (!mem->dmabuf) {
+ struct amdgpu_device *bo_adev;
+ struct dma_buf *dmabuf;
+
+ bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
+ dmabuf = drm_gem_prime_handle_to_dmabuf(&bo_adev->ddev, bo_adev->kfd.client.file,
+ mem->gem_handle,
+ mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
+ DRM_RDWR : 0);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+ mem->dmabuf = dmabuf;
+ }
+
+ return 0;
+}
+
+static int
+kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
+ struct amdgpu_bo **bo)
+{
+ struct drm_gem_object *gobj;
+ int ret;
+
+ ret = kfd_mem_export_dmabuf(mem);
+ if (ret)
+ return ret;
+
+ gobj = amdgpu_gem_prime_import(adev_to_drm(adev), mem->dmabuf);
+ if (IS_ERR(gobj))
+ return PTR_ERR(gobj);
+
+ *bo = gem_to_amdgpu_bo(gobj);
+ (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
+
+ return 0;
+}
+
+/* kfd_mem_attach - Add a BO to a VM
+ *
+ * Everything that needs to bo done only once when a BO is first added
+ * to a VM. It can later be mapped and unmapped many times without
+ * repeating these steps.
+ *
+ * 0. Create BO for DMA mapping, if needed
+ * 1. Allocate and initialize BO VA entry data structure
+ * 2. Add BO to the VM
+ * 3. Determine ASIC-specific PTE flags
+ * 4. Alloc page tables and directories if needed
+ * 4a. Validate new page tables and directories
+ */
+static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
+ struct amdgpu_vm *vm, bool is_aql)
+{
+ struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
+ unsigned long bo_size = mem->bo->tbo.base.size;
+ uint64_t va = mem->va;
+ struct kfd_mem_attachment *attachment[2] = {NULL, NULL};
+ struct amdgpu_bo *bo[2] = {NULL, NULL};
+ struct amdgpu_bo_va *bo_va;
+ bool same_hive = false;
+ int i, ret;
+
+ if (!va) {
+ pr_err("Invalid VA when adding BO to VM\n");
+ return -EINVAL;
+ }
+
+ /* Determine access to VRAM, MMIO and DOORBELL BOs of peer devices
+ *
+ * The access path of MMIO and DOORBELL BOs of is always over PCIe.
+ * In contrast the access path of VRAM BOs depens upon the type of
+ * link that connects the peer device. Access over PCIe is allowed
+ * if peer device has large BAR. In contrast, access over xGMI is
+ * allowed for both small and large BAR configurations of peer device
+ */
+ if ((adev != bo_adev && !adev->apu_prefer_gtt) &&
+ ((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) ||
+ (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) ||
+ (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
+ if (mem->domain == AMDGPU_GEM_DOMAIN_VRAM)
+ same_hive = amdgpu_xgmi_same_hive(adev, bo_adev);
+ if (!same_hive && !amdgpu_device_is_peer_accessible(bo_adev, adev))
+ return -EINVAL;
+ }
+
+ for (i = 0; i <= is_aql; i++) {
+ attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL);
+ if (unlikely(!attachment[i])) {
+ ret = -ENOMEM;
+ goto unwind;
+ }
+
+ pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
+ va + bo_size, vm);
+
+ if ((adev == bo_adev && !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) ||
+ (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && reuse_dmamap(adev, bo_adev)) ||
+ (mem->domain == AMDGPU_GEM_DOMAIN_GTT && reuse_dmamap(adev, bo_adev)) ||
+ same_hive) {
+ /* Mappings on the local GPU, or VRAM mappings in the
+ * local hive, or userptr, or GTT mapping can reuse dma map
+ * address space share the original BO
+ */
+ attachment[i]->type = KFD_MEM_ATT_SHARED;
+ bo[i] = mem->bo;
+ drm_gem_object_get(&bo[i]->tbo.base);
+ } else if (i > 0) {
+ /* Multiple mappings on the same GPU share the BO */
+ attachment[i]->type = KFD_MEM_ATT_SHARED;
+ bo[i] = bo[0];
+ drm_gem_object_get(&bo[i]->tbo.base);
+ } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
+ /* Create an SG BO to DMA-map userptrs on other GPUs */
+ attachment[i]->type = KFD_MEM_ATT_USERPTR;
+ ret = create_dmamap_sg_bo(adev, mem, &bo[i]);
+ if (ret)
+ goto unwind;
+ /* Handle DOORBELL BOs of peer devices and MMIO BOs of local and peer devices */
+ } else if (mem->bo->tbo.type == ttm_bo_type_sg) {
+ WARN_ONCE(!(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL ||
+ mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP),
+ "Handing invalid SG BO in ATTACH request");
+ attachment[i]->type = KFD_MEM_ATT_SG;
+ ret = create_dmamap_sg_bo(adev, mem, &bo[i]);
+ if (ret)
+ goto unwind;
+ /* Enable acces to GTT and VRAM BOs of peer devices */
+ } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT ||
+ mem->domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ attachment[i]->type = KFD_MEM_ATT_DMABUF;
+ ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]);
+ if (ret)
+ goto unwind;
+ pr_debug("Employ DMABUF mechanism to enable peer GPU access\n");
+ } else {
+ WARN_ONCE(true, "Handling invalid ATTACH request");
+ ret = -EINVAL;
+ goto unwind;
+ }
+
+ /* Add BO to VM internal data structures */
+ ret = amdgpu_bo_reserve(bo[i], false);
+ if (ret) {
+ pr_debug("Unable to reserve BO during memory attach");
+ goto unwind;
+ }
+ bo_va = amdgpu_vm_bo_find(vm, bo[i]);
+ if (!bo_va)
+ bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
+ else
+ ++bo_va->ref_count;
+ attachment[i]->bo_va = bo_va;
+ amdgpu_bo_unreserve(bo[i]);
+ if (unlikely(!attachment[i]->bo_va)) {
+ ret = -ENOMEM;
+ pr_err("Failed to add BO object to VM. ret == %d\n",
+ ret);
+ goto unwind;
+ }
+ attachment[i]->va = va;
+ attachment[i]->pte_flags = get_pte_flags(adev, vm, mem);
+ attachment[i]->adev = adev;
+ list_add(&attachment[i]->list, &mem->attachments);
+
+ va += bo_size;
+ }
+
+ return 0;
+
+unwind:
+ for (; i >= 0; i--) {
+ if (!attachment[i])
+ continue;
+ if (attachment[i]->bo_va) {
+ (void)amdgpu_bo_reserve(bo[i], true);
+ if (--attachment[i]->bo_va->ref_count == 0)
+ amdgpu_vm_bo_del(adev, attachment[i]->bo_va);
+ amdgpu_bo_unreserve(bo[i]);
+ list_del(&attachment[i]->list);
+ }
+ if (bo[i])
+ drm_gem_object_put(&bo[i]->tbo.base);
+ kfree(attachment[i]);
+ }
+ return ret;
+}
+
+static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
+{
+ struct amdgpu_bo *bo = attachment->bo_va->base.bo;
+
+ pr_debug("\t remove VA 0x%llx in entry %p\n",
+ attachment->va, attachment);
+ if (--attachment->bo_va->ref_count == 0)
+ amdgpu_vm_bo_del(attachment->adev, attachment->bo_va);
+ drm_gem_object_put(&bo->tbo.base);
+ list_del(&attachment->list);
+ kfree(attachment);
+}
+
+static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem,
+ struct amdkfd_process_info *process_info,
+ bool userptr)
+{
+ mutex_lock(&process_info->lock);
+ if (userptr)
+ list_add_tail(&mem->validate_list,
+ &process_info->userptr_valid_list);
+ else
+ list_add_tail(&mem->validate_list, &process_info->kfd_bo_list);
+ mutex_unlock(&process_info->lock);
+}
+
+static void remove_kgd_mem_from_kfd_bo_list(struct kgd_mem *mem,
+ struct amdkfd_process_info *process_info)
+{
+ mutex_lock(&process_info->lock);
+ list_del(&mem->validate_list);
+ mutex_unlock(&process_info->lock);
+}
+
+/* Initializes user pages. It registers the MMU notifier and validates
+ * the userptr BO in the GTT domain.
+ *
+ * The BO must already be on the userptr_valid_list. Otherwise an
+ * eviction and restore may happen that leaves the new BO unmapped
+ * with the user mode queues running.
+ *
+ * Takes the process_info->lock to protect against concurrent restore
+ * workers.
+ *
+ * Returns 0 for success, negative errno for errors.
+ */
+static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
+ bool criu_resume)
+{
+ struct amdkfd_process_info *process_info = mem->process_info;
+ struct amdgpu_bo *bo = mem->bo;
+ struct ttm_operation_ctx ctx = { true, false };
+ struct hmm_range *range;
+ int ret = 0;
+
+ mutex_lock(&process_info->lock);
+
+ ret = amdgpu_ttm_tt_set_userptr(&bo->tbo, user_addr, 0);
+ if (ret) {
+ pr_err("%s: Failed to set userptr: %d\n", __func__, ret);
+ goto out;
+ }
+
+ ret = amdgpu_hmm_register(bo, user_addr);
+ if (ret) {
+ pr_err("%s: Failed to register MMU notifier: %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ if (criu_resume) {
+ /*
+ * During a CRIU restore operation, the userptr buffer objects
+ * will be validated in the restore_userptr_work worker at a
+ * later stage when it is scheduled by another ioctl called by
+ * CRIU master process for the target pid for restore.
+ */
+ mutex_lock(&process_info->notifier_lock);
+ mem->invalid++;
+ mutex_unlock(&process_info->notifier_lock);
+ mutex_unlock(&process_info->lock);
+ return 0;
+ }
+
+ ret = amdgpu_ttm_tt_get_user_pages(bo, &range);
+ if (ret) {
+ if (ret == -EAGAIN)
+ pr_debug("Failed to get user pages, try again\n");
+ else
+ pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
+ goto unregister_out;
+ }
+
+ ret = amdgpu_bo_reserve(bo, true);
+ if (ret) {
+ pr_err("%s: Failed to reserve BO\n", __func__);
+ goto release_out;
+ }
+
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
+
+ amdgpu_bo_placement_from_domain(bo, mem->domain);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ pr_err("%s: failed to validate BO\n", __func__);
+ amdgpu_bo_unreserve(bo);
+
+release_out:
+ amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
+unregister_out:
+ if (ret)
+ amdgpu_hmm_unregister(bo);
+out:
+ mutex_unlock(&process_info->lock);
+ return ret;
+}
+
+/* Reserving a BO and its page table BOs must happen atomically to
+ * avoid deadlocks. Some operations update multiple VMs at once. Track
+ * all the reservation info in a context structure. Optionally a sync
+ * object can track VM updates.
+ */
+struct bo_vm_reservation_context {
+ /* DRM execution context for the reservation */
+ struct drm_exec exec;
+ /* Number of VMs reserved */
+ unsigned int n_vms;
+ /* Pointer to sync object */
+ struct amdgpu_sync *sync;
+};
+
+enum bo_vm_match {
+ BO_VM_NOT_MAPPED = 0, /* Match VMs where a BO is not mapped */
+ BO_VM_MAPPED, /* Match VMs where a BO is mapped */
+ BO_VM_ALL, /* Match all VMs a BO was added to */
+};
+
+/**
+ * reserve_bo_and_vm - reserve a BO and a VM unconditionally.
+ * @mem: KFD BO structure.
+ * @vm: the VM to reserve.
+ * @ctx: the struct that will be used in unreserve_bo_and_vms().
+ */
+static int reserve_bo_and_vm(struct kgd_mem *mem,
+ struct amdgpu_vm *vm,
+ struct bo_vm_reservation_context *ctx)
+{
+ struct amdgpu_bo *bo = mem->bo;
+ int ret;
+
+ WARN_ON(!vm);
+
+ ctx->n_vms = 1;
+ ctx->sync = &mem->sync;
+ drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_until_all_locked(&ctx->exec) {
+ ret = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
+ drm_exec_retry_on_contention(&ctx->exec);
+ if (unlikely(ret))
+ goto error;
+
+ ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1);
+ drm_exec_retry_on_contention(&ctx->exec);
+ if (unlikely(ret))
+ goto error;
+ }
+ return 0;
+
+error:
+ pr_err("Failed to reserve buffers in ttm.\n");
+ drm_exec_fini(&ctx->exec);
+ return ret;
+}
+
+/**
+ * reserve_bo_and_cond_vms - reserve a BO and some VMs conditionally
+ * @mem: KFD BO structure.
+ * @vm: the VM to reserve. If NULL, then all VMs associated with the BO
+ * is used. Otherwise, a single VM associated with the BO.
+ * @map_type: the mapping status that will be used to filter the VMs.
+ * @ctx: the struct that will be used in unreserve_bo_and_vms().
+ *
+ * Returns 0 for success, negative for failure.
+ */
+static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
+ struct amdgpu_vm *vm, enum bo_vm_match map_type,
+ struct bo_vm_reservation_context *ctx)
+{
+ struct kfd_mem_attachment *entry;
+ struct amdgpu_bo *bo = mem->bo;
+ int ret;
+
+ ctx->sync = &mem->sync;
+ drm_exec_init(&ctx->exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&ctx->exec) {
+ ctx->n_vms = 0;
+ list_for_each_entry(entry, &mem->attachments, list) {
+ if ((vm && vm != entry->bo_va->base.vm) ||
+ (entry->is_mapped != map_type
+ && map_type != BO_VM_ALL))
+ continue;
+
+ ret = amdgpu_vm_lock_pd(entry->bo_va->base.vm,
+ &ctx->exec, 2);
+ drm_exec_retry_on_contention(&ctx->exec);
+ if (unlikely(ret))
+ goto error;
+ ++ctx->n_vms;
+ }
+
+ ret = drm_exec_prepare_obj(&ctx->exec, &bo->tbo.base, 1);
+ drm_exec_retry_on_contention(&ctx->exec);
+ if (unlikely(ret))
+ goto error;
+ }
+ return 0;
+
+error:
+ pr_err("Failed to reserve buffers in ttm.\n");
+ drm_exec_fini(&ctx->exec);
+ return ret;
+}
+
+/**
+ * unreserve_bo_and_vms - Unreserve BO and VMs from a reservation context
+ * @ctx: Reservation context to unreserve
+ * @wait: Optionally wait for a sync object representing pending VM updates
+ * @intr: Whether the wait is interruptible
+ *
+ * Also frees any resources allocated in
+ * reserve_bo_and_(cond_)vm(s). Returns the status from
+ * amdgpu_sync_wait.
+ */
+static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
+ bool wait, bool intr)
+{
+ int ret = 0;
+
+ if (wait)
+ ret = amdgpu_sync_wait(ctx->sync, intr);
+
+ drm_exec_fini(&ctx->exec);
+ ctx->sync = NULL;
+ return ret;
+}
+
+static int unmap_bo_from_gpuvm(struct kgd_mem *mem,
+ struct kfd_mem_attachment *entry,
+ struct amdgpu_sync *sync)
+{
+ struct amdgpu_bo_va *bo_va = entry->bo_va;
+ struct amdgpu_device *adev = entry->adev;
+ struct amdgpu_vm *vm = bo_va->base.vm;
+
+ if (bo_va->queue_refcount) {
+ pr_debug("bo_va->queue_refcount %d\n", bo_va->queue_refcount);
+ return -EBUSY;
+ }
+
+ (void)amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
+
+ (void)amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
+
+ (void)amdgpu_sync_fence(sync, bo_va->last_pt_update, GFP_KERNEL);
+
+ return 0;
+}
+
+static int update_gpuvm_pte(struct kgd_mem *mem,
+ struct kfd_mem_attachment *entry,
+ struct amdgpu_sync *sync)
+{
+ struct amdgpu_bo_va *bo_va = entry->bo_va;
+ struct amdgpu_device *adev = entry->adev;
+ int ret;
+
+ ret = kfd_mem_dmamap_attachment(mem, entry);
+ if (ret)
+ return ret;
+
+ /* Update the page tables */
+ ret = amdgpu_vm_bo_update(adev, bo_va, false);
+ if (ret) {
+ pr_err("amdgpu_vm_bo_update failed\n");
+ return ret;
+ }
+
+ return amdgpu_sync_fence(sync, bo_va->last_pt_update, GFP_KERNEL);
+}
+
+static int map_bo_to_gpuvm(struct kgd_mem *mem,
+ struct kfd_mem_attachment *entry,
+ struct amdgpu_sync *sync,
+ bool no_update_pte)
+{
+ int ret;
+
+ /* Set virtual address for the allocation */
+ ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
+ amdgpu_bo_size(entry->bo_va->base.bo),
+ entry->pte_flags);
+ if (ret) {
+ pr_err("Failed to map VA 0x%llx in vm. ret %d\n",
+ entry->va, ret);
+ return ret;
+ }
+
+ if (no_update_pte)
+ return 0;
+
+ ret = update_gpuvm_pte(mem, entry, sync);
+ if (ret) {
+ pr_err("update_gpuvm_pte() failed\n");
+ goto update_gpuvm_pte_failed;
+ }
+
+ return 0;
+
+update_gpuvm_pte_failed:
+ unmap_bo_from_gpuvm(mem, entry, sync);
+ kfd_mem_dmaunmap_attachment(mem, entry);
+ return ret;
+}
+
+static int process_validate_vms(struct amdkfd_process_info *process_info,
+ struct ww_acquire_ctx *ticket)
+{
+ struct amdgpu_vm *peer_vm;
+ int ret;
+
+ list_for_each_entry(peer_vm, &process_info->vm_list_head,
+ vm_list_node) {
+ ret = vm_validate_pt_pd_bos(peer_vm, ticket);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
+ struct amdgpu_sync *sync)
+{
+ struct amdgpu_vm *peer_vm;
+ int ret;
+
+ list_for_each_entry(peer_vm, &process_info->vm_list_head,
+ vm_list_node) {
+ struct amdgpu_bo *pd = peer_vm->root.bo;
+
+ ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
+ AMDGPU_SYNC_NE_OWNER,
+ AMDGPU_FENCE_OWNER_KFD);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int process_update_pds(struct amdkfd_process_info *process_info,
+ struct amdgpu_sync *sync)
+{
+ struct amdgpu_vm *peer_vm;
+ int ret;
+
+ list_for_each_entry(peer_vm, &process_info->vm_list_head,
+ vm_list_node) {
+ ret = vm_update_pds(peer_vm, sync);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
+ struct dma_fence **ef)
+{
+ struct amdkfd_process_info *info = NULL;
+ int ret;
+
+ if (!*process_info) {
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ mutex_init(&info->lock);
+ mutex_init(&info->notifier_lock);
+ INIT_LIST_HEAD(&info->vm_list_head);
+ INIT_LIST_HEAD(&info->kfd_bo_list);
+ INIT_LIST_HEAD(&info->userptr_valid_list);
+ INIT_LIST_HEAD(&info->userptr_inval_list);
+
+ info->eviction_fence =
+ amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
+ current->mm,
+ NULL);
+ if (!info->eviction_fence) {
+ pr_err("Failed to create eviction fence\n");
+ ret = -ENOMEM;
+ goto create_evict_fence_fail;
+ }
+
+ info->pid = get_task_pid(current->group_leader, PIDTYPE_PID);
+ INIT_DELAYED_WORK(&info->restore_userptr_work,
+ amdgpu_amdkfd_restore_userptr_worker);
+
+ *process_info = info;
+ }
+
+ vm->process_info = *process_info;
+
+ /* Validate page directory and attach eviction fence */
+ ret = amdgpu_bo_reserve(vm->root.bo, true);
+ if (ret)
+ goto reserve_pd_fail;
+ ret = vm_validate_pt_pd_bos(vm, NULL);
+ if (ret) {
+ pr_err("validate_pt_pd_bos() failed\n");
+ goto validate_pd_fail;
+ }
+ ret = amdgpu_bo_sync_wait(vm->root.bo,
+ AMDGPU_FENCE_OWNER_KFD, false);
+ if (ret)
+ goto wait_pd_fail;
+ ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1);
+ if (ret)
+ goto reserve_shared_fail;
+ dma_resv_add_fence(vm->root.bo->tbo.base.resv,
+ &vm->process_info->eviction_fence->base,
+ DMA_RESV_USAGE_BOOKKEEP);
+ amdgpu_bo_unreserve(vm->root.bo);
+
+ /* Update process info */
+ mutex_lock(&vm->process_info->lock);
+ list_add_tail(&vm->vm_list_node,
+ &(vm->process_info->vm_list_head));
+ vm->process_info->n_vms++;
+ if (ef)
+ *ef = dma_fence_get(&vm->process_info->eviction_fence->base);
+ mutex_unlock(&vm->process_info->lock);
+
+ return 0;
+
+reserve_shared_fail:
+wait_pd_fail:
+validate_pd_fail:
+ amdgpu_bo_unreserve(vm->root.bo);
+reserve_pd_fail:
+ vm->process_info = NULL;
+ if (info) {
+ dma_fence_put(&info->eviction_fence->base);
+ *process_info = NULL;
+ put_pid(info->pid);
+create_evict_fence_fail:
+ mutex_destroy(&info->lock);
+ mutex_destroy(&info->notifier_lock);
+ kfree(info);
+ }
+ return ret;
+}
+
+/**
+ * amdgpu_amdkfd_gpuvm_pin_bo() - Pins a BO using following criteria
+ * @bo: Handle of buffer object being pinned
+ * @domain: Domain into which BO should be pinned
+ *
+ * - USERPTR BOs are UNPINNABLE and will return error
+ * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
+ * PIN count incremented. It is valid to PIN a BO multiple times
+ *
+ * Return: ZERO if successful in pinning, Non-Zero in case of error.
+ */
+static int amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo *bo, u32 domain)
+{
+ int ret = 0;
+
+ ret = amdgpu_bo_reserve(bo, false);
+ if (unlikely(ret))
+ return ret;
+
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) {
+ /*
+ * If bo is not contiguous on VRAM, move to system memory first to ensure
+ * we can get contiguous VRAM space after evicting other BOs.
+ */
+ if (!(bo->tbo.resource->placement & TTM_PL_FLAG_CONTIGUOUS)) {
+ struct ttm_operation_ctx ctx = { true, false };
+
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (unlikely(ret)) {
+ pr_debug("validate bo 0x%p to GTT failed %d\n", &bo->tbo, ret);
+ goto out;
+ }
+ }
+ }
+
+ ret = amdgpu_bo_pin(bo, domain);
+ if (ret)
+ pr_err("Error in Pinning BO to domain: %d\n", domain);
+
+ amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
+out:
+ amdgpu_bo_unreserve(bo);
+ return ret;
+}
+
+/**
+ * amdgpu_amdkfd_gpuvm_unpin_bo() - Unpins BO using following criteria
+ * @bo: Handle of buffer object being unpinned
+ *
+ * - Is a illegal request for USERPTR BOs and is ignored
+ * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their
+ * PIN count decremented. Calls to UNPIN must balance calls to PIN
+ */
+static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo)
+{
+ int ret = 0;
+
+ ret = amdgpu_bo_reserve(bo, false);
+ if (unlikely(ret))
+ return;
+
+ amdgpu_bo_unpin(bo);
+ amdgpu_bo_unreserve(bo);
+}
+
+int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
+ struct amdgpu_vm *avm,
+ void **process_info,
+ struct dma_fence **ef)
+{
+ int ret;
+
+ /* Already a compute VM? */
+ if (avm->process_info)
+ return -EINVAL;
+
+ /* Convert VM into a compute VM */
+ ret = amdgpu_vm_make_compute(adev, avm);
+ if (ret)
+ return ret;
+
+ /* Initialize KFD part of the VM and process info */
+ ret = init_kfd_vm(avm, process_info, ef);
+ if (ret)
+ return ret;
+
+ amdgpu_vm_set_task_info(avm);
+
+ return 0;
+}
+
+void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
+{
+ struct amdkfd_process_info *process_info = vm->process_info;
+
+ if (!process_info)
+ return;
+
+ /* Update process info */
+ mutex_lock(&process_info->lock);
+ process_info->n_vms--;
+ list_del(&vm->vm_list_node);
+ mutex_unlock(&process_info->lock);
+
+ vm->process_info = NULL;
+
+ /* Release per-process resources when last compute VM is destroyed */
+ if (!process_info->n_vms) {
+ WARN_ON(!list_empty(&process_info->kfd_bo_list));
+ WARN_ON(!list_empty(&process_info->userptr_valid_list));
+ WARN_ON(!list_empty(&process_info->userptr_inval_list));
+
+ dma_fence_put(&process_info->eviction_fence->base);
+ cancel_delayed_work_sync(&process_info->restore_userptr_work);
+ put_pid(process_info->pid);
+ mutex_destroy(&process_info->lock);
+ mutex_destroy(&process_info->notifier_lock);
+ kfree(process_info);
+ }
+}
+
+uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv)
+{
+ struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
+ struct amdgpu_bo *pd = avm->root.bo;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev);
+
+ if (adev->asic_type < CHIP_VEGA10)
+ return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT;
+ return avm->pd_phys_addr;
+}
+
+void amdgpu_amdkfd_block_mmu_notifications(void *p)
+{
+ struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p;
+
+ mutex_lock(&pinfo->lock);
+ WRITE_ONCE(pinfo->block_mmu_notifications, true);
+ mutex_unlock(&pinfo->lock);
+}
+
+int amdgpu_amdkfd_criu_resume(void *p)
+{
+ int ret = 0;
+ struct amdkfd_process_info *pinfo = (struct amdkfd_process_info *)p;
+
+ mutex_lock(&pinfo->lock);
+ pr_debug("scheduling work\n");
+ mutex_lock(&pinfo->notifier_lock);
+ pinfo->evicted_bos++;
+ mutex_unlock(&pinfo->notifier_lock);
+ if (!READ_ONCE(pinfo->block_mmu_notifications)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ WRITE_ONCE(pinfo->block_mmu_notifications, false);
+ queue_delayed_work(system_freezable_wq,
+ &pinfo->restore_userptr_work, 0);
+
+out_unlock:
+ mutex_unlock(&pinfo->lock);
+ return ret;
+}
+
+size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
+ uint8_t xcp_id)
+{
+ uint64_t reserved_for_pt =
+ ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ uint64_t reserved_for_ras = (con ? con->reserved_pages_in_bytes : 0);
+ ssize_t available;
+ uint64_t vram_available, system_mem_available, ttm_mem_available;
+
+ spin_lock(&kfd_mem_limit.mem_limit_lock);
+ if (adev->apu_prefer_gtt && !adev->gmc.is_app_apu)
+ vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id)
+ - adev->kfd.vram_used_aligned[xcp_id];
+ else
+ vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id)
+ - adev->kfd.vram_used_aligned[xcp_id]
+ - atomic64_read(&adev->vram_pin_size)
+ - reserved_for_pt
+ - reserved_for_ras;
+
+ if (adev->apu_prefer_gtt) {
+ system_mem_available = no_system_mem_limit ?
+ kfd_mem_limit.max_system_mem_limit :
+ kfd_mem_limit.max_system_mem_limit -
+ kfd_mem_limit.system_mem_used;
+
+ ttm_mem_available = kfd_mem_limit.max_ttm_mem_limit -
+ kfd_mem_limit.ttm_mem_used;
+
+ available = min3(system_mem_available, ttm_mem_available,
+ vram_available);
+ available = ALIGN_DOWN(available, PAGE_SIZE);
+ } else {
+ available = ALIGN_DOWN(vram_available, VRAM_AVAILABLITY_ALIGN);
+ }
+
+ spin_unlock(&kfd_mem_limit.mem_limit_lock);
+
+ if (available < 0)
+ available = 0;
+
+ return available;
+}
+
+int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
+ struct amdgpu_device *adev, uint64_t va, uint64_t size,
+ void *drm_priv, struct kgd_mem **mem,
+ uint64_t *offset, uint32_t flags, bool criu_resume)
+{
+ struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
+ struct amdgpu_fpriv *fpriv = container_of(avm, struct amdgpu_fpriv, vm);
+ enum ttm_bo_type bo_type = ttm_bo_type_device;
+ struct sg_table *sg = NULL;
+ uint64_t user_addr = 0;
+ struct amdgpu_bo *bo;
+ struct drm_gem_object *gobj = NULL;
+ u32 domain, alloc_domain;
+ uint64_t aligned_size;
+ int8_t xcp_id = -1;
+ u64 alloc_flags;
+ int ret;
+
+ /*
+ * Check on which domain to allocate BO
+ */
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
+ domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
+
+ if (adev->apu_prefer_gtt) {
+ domain = AMDGPU_GEM_DOMAIN_GTT;
+ alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
+ alloc_flags = 0;
+ } else {
+ alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
+ alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
+
+ /* For contiguous VRAM allocation */
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_CONTIGUOUS)
+ alloc_flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ }
+ xcp_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ?
+ 0 : fpriv->xcp_id;
+ } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
+ domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
+ alloc_flags = 0;
+ } else {
+ domain = AMDGPU_GEM_DOMAIN_GTT;
+ alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
+ alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE;
+
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
+ if (!offset || !*offset)
+ return -EINVAL;
+ user_addr = untagged_addr(*offset);
+ } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
+ KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
+ bo_type = ttm_bo_type_sg;
+ if (size > UINT_MAX)
+ return -EINVAL;
+ sg = create_sg_table(*offset, size);
+ if (!sg)
+ return -ENOMEM;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT)
+ alloc_flags |= AMDGPU_GEM_CREATE_COHERENT;
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_EXT_COHERENT)
+ alloc_flags |= AMDGPU_GEM_CREATE_EXT_COHERENT;
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED)
+ alloc_flags |= AMDGPU_GEM_CREATE_UNCACHED;
+
+ *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
+ if (!*mem) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ INIT_LIST_HEAD(&(*mem)->attachments);
+ mutex_init(&(*mem)->lock);
+ (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
+
+ /* Workaround for AQL queue wraparound bug. Map the same
+ * memory twice. That means we only actually allocate half
+ * the memory.
+ */
+ if ((*mem)->aql_queue)
+ size >>= 1;
+ aligned_size = PAGE_ALIGN(size);
+
+ (*mem)->alloc_flags = flags;
+
+ amdgpu_sync_create(&(*mem)->sync);
+
+ ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags,
+ xcp_id);
+ if (ret) {
+ pr_debug("Insufficient memory\n");
+ goto err_reserve_limit;
+ }
+
+ pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s xcp_id %d\n",
+ va, (*mem)->aql_queue ? size << 1 : size,
+ domain_string(alloc_domain), xcp_id);
+
+ ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags,
+ bo_type, NULL, &gobj, xcp_id + 1);
+ if (ret) {
+ pr_debug("Failed to create BO on domain %s. ret %d\n",
+ domain_string(alloc_domain), ret);
+ goto err_bo_create;
+ }
+ ret = drm_vma_node_allow(&gobj->vma_node, drm_priv);
+ if (ret) {
+ pr_debug("Failed to allow vma node access. ret %d\n", ret);
+ goto err_node_allow;
+ }
+ ret = drm_gem_handle_create(adev->kfd.client.file, gobj, &(*mem)->gem_handle);
+ if (ret)
+ goto err_gem_handle_create;
+ bo = gem_to_amdgpu_bo(gobj);
+ if (bo_type == ttm_bo_type_sg) {
+ bo->tbo.sg = sg;
+ bo->tbo.ttm->sg = sg;
+ }
+ bo->kfd_bo = *mem;
+ (*mem)->bo = bo;
+ if (user_addr)
+ bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO;
+
+ (*mem)->va = va;
+ (*mem)->domain = domain;
+ (*mem)->mapped_to_gpu_memory = 0;
+ (*mem)->process_info = avm->process_info;
+
+ add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
+
+ if (user_addr) {
+ pr_debug("creating userptr BO for user_addr = %llx\n", user_addr);
+ ret = init_user_pages(*mem, user_addr, criu_resume);
+ if (ret)
+ goto allocate_init_user_pages_failed;
+ } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
+ KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
+ ret = amdgpu_amdkfd_gpuvm_pin_bo(bo, AMDGPU_GEM_DOMAIN_GTT);
+ if (ret) {
+ pr_err("Pinning MMIO/DOORBELL BO during ALLOC FAILED\n");
+ goto err_pin_bo;
+ }
+ bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
+ bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
+ } else {
+ mutex_lock(&avm->process_info->lock);
+ if (avm->process_info->eviction_fence &&
+ !dma_fence_is_signaled(&avm->process_info->eviction_fence->base))
+ ret = amdgpu_amdkfd_bo_validate_and_fence(bo, domain,
+ &avm->process_info->eviction_fence->base);
+ mutex_unlock(&avm->process_info->lock);
+ if (ret)
+ goto err_validate_bo;
+ }
+
+ if (offset)
+ *offset = amdgpu_bo_mmap_offset(bo);
+
+ return 0;
+
+allocate_init_user_pages_failed:
+err_pin_bo:
+err_validate_bo:
+ remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
+ drm_gem_handle_delete(adev->kfd.client.file, (*mem)->gem_handle);
+err_gem_handle_create:
+ drm_vma_node_revoke(&gobj->vma_node, drm_priv);
+err_node_allow:
+ /* Don't unreserve system mem limit twice */
+ goto err_reserve_limit;
+err_bo_create:
+ amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id);
+err_reserve_limit:
+ amdgpu_sync_free(&(*mem)->sync);
+ mutex_destroy(&(*mem)->lock);
+ if (gobj)
+ drm_gem_object_put(gobj);
+ else
+ kfree(*mem);
+err:
+ if (sg) {
+ sg_free_table(sg);
+ kfree(sg);
+ }
+ return ret;
+}
+
+int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
+ struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
+ uint64_t *size)
+{
+ struct amdkfd_process_info *process_info = mem->process_info;
+ unsigned long bo_size = mem->bo->tbo.base.size;
+ bool use_release_notifier = (mem->bo->kfd_bo == mem);
+ struct kfd_mem_attachment *entry, *tmp;
+ struct bo_vm_reservation_context ctx;
+ unsigned int mapped_to_gpu_memory;
+ int ret;
+ bool is_imported = false;
+
+ mutex_lock(&mem->lock);
+
+ /* Unpin MMIO/DOORBELL BO's that were pinned during allocation */
+ if (mem->alloc_flags &
+ (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
+ KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
+ amdgpu_amdkfd_gpuvm_unpin_bo(mem->bo);
+ }
+
+ mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
+ is_imported = mem->is_imported;
+ mutex_unlock(&mem->lock);
+ /* lock is not needed after this, since mem is unused and will
+ * be freed anyway
+ */
+
+ if (mapped_to_gpu_memory > 0) {
+ pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
+ mem->va, bo_size);
+ return -EBUSY;
+ }
+
+ /* Make sure restore workers don't access the BO any more */
+ mutex_lock(&process_info->lock);
+ list_del(&mem->validate_list);
+ mutex_unlock(&process_info->lock);
+
+ /* Cleanup user pages and MMU notifiers */
+ if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) {
+ amdgpu_hmm_unregister(mem->bo);
+ mutex_lock(&process_info->notifier_lock);
+ amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range);
+ mutex_unlock(&process_info->notifier_lock);
+ }
+
+ ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
+ if (unlikely(ret))
+ return ret;
+
+ amdgpu_amdkfd_remove_eviction_fence(mem->bo,
+ process_info->eviction_fence);
+ pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
+ mem->va + bo_size * (1 + mem->aql_queue));
+
+ /* Remove from VM internal data structures */
+ list_for_each_entry_safe(entry, tmp, &mem->attachments, list) {
+ kfd_mem_dmaunmap_attachment(mem, entry);
+ kfd_mem_detach(entry);
+ }
+
+ ret = unreserve_bo_and_vms(&ctx, false, false);
+
+ /* Free the sync object */
+ amdgpu_sync_free(&mem->sync);
+
+ /* If the SG is not NULL, it's one we created for a doorbell or mmio
+ * remap BO. We need to free it.
+ */
+ if (mem->bo->tbo.sg) {
+ sg_free_table(mem->bo->tbo.sg);
+ kfree(mem->bo->tbo.sg);
+ }
+
+ /* Update the size of the BO being freed if it was allocated from
+ * VRAM and is not imported. For APP APU VRAM allocations are done
+ * in GTT domain
+ */
+ if (size) {
+ if (!is_imported &&
+ (mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
+ (adev->apu_prefer_gtt &&
+ mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT)))
+ *size = bo_size;
+ else
+ *size = 0;
+ }
+
+ /* Free the BO*/
+ drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv);
+ drm_gem_handle_delete(adev->kfd.client.file, mem->gem_handle);
+ if (mem->dmabuf) {
+ dma_buf_put(mem->dmabuf);
+ mem->dmabuf = NULL;
+ }
+ mutex_destroy(&mem->lock);
+
+ /* If this releases the last reference, it will end up calling
+ * amdgpu_amdkfd_release_notify and kfree the mem struct. That's why
+ * this needs to be the last call here.
+ */
+ drm_gem_object_put(&mem->bo->tbo.base);
+
+ /*
+ * For kgd_mem allocated in amdgpu_amdkfd_gpuvm_import_dmabuf(),
+ * explicitly free it here.
+ */
+ if (!use_release_notifier)
+ kfree(mem);
+
+ return ret;
+}
+
+int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
+ struct amdgpu_device *adev, struct kgd_mem *mem,
+ void *drm_priv)
+{
+ struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
+ int ret;
+ struct amdgpu_bo *bo;
+ uint32_t domain;
+ struct kfd_mem_attachment *entry;
+ struct bo_vm_reservation_context ctx;
+ unsigned long bo_size;
+ bool is_invalid_userptr = false;
+
+ bo = mem->bo;
+ if (!bo) {
+ pr_err("Invalid BO when mapping memory to GPU\n");
+ return -EINVAL;
+ }
+
+ /* Make sure restore is not running concurrently. Since we
+ * don't map invalid userptr BOs, we rely on the next restore
+ * worker to do the mapping
+ */
+ mutex_lock(&mem->process_info->lock);
+
+ /* Lock notifier lock. If we find an invalid userptr BO, we can be
+ * sure that the MMU notifier is no longer running
+ * concurrently and the queues are actually stopped
+ */
+ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
+ mutex_lock(&mem->process_info->notifier_lock);
+ is_invalid_userptr = !!mem->invalid;
+ mutex_unlock(&mem->process_info->notifier_lock);
+ }
+
+ mutex_lock(&mem->lock);
+
+ domain = mem->domain;
+ bo_size = bo->tbo.base.size;
+
+ pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n",
+ mem->va,
+ mem->va + bo_size * (1 + mem->aql_queue),
+ avm, domain_string(domain));
+
+ if (!kfd_mem_is_attached(avm, mem)) {
+ ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue);
+ if (ret)
+ goto out;
+ }
+
+ ret = reserve_bo_and_vm(mem, avm, &ctx);
+ if (unlikely(ret))
+ goto out;
+
+ /* Userptr can be marked as "not invalid", but not actually be
+ * validated yet (still in the system domain). In that case
+ * the queues are still stopped and we can leave mapping for
+ * the next restore worker
+ */
+ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
+ bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
+ is_invalid_userptr = true;
+
+ ret = vm_validate_pt_pd_bos(avm, NULL);
+ if (unlikely(ret))
+ goto out_unreserve;
+
+ list_for_each_entry(entry, &mem->attachments, list) {
+ if (entry->bo_va->base.vm != avm || entry->is_mapped)
+ continue;
+
+ pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
+ entry->va, entry->va + bo_size, entry);
+
+ ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
+ is_invalid_userptr);
+ if (ret) {
+ pr_err("Failed to map bo to gpuvm\n");
+ goto out_unreserve;
+ }
+
+ ret = vm_update_pds(avm, ctx.sync);
+ if (ret) {
+ pr_err("Failed to update page directories\n");
+ goto out_unreserve;
+ }
+
+ entry->is_mapped = true;
+ mem->mapped_to_gpu_memory++;
+ pr_debug("\t INC mapping count %d\n",
+ mem->mapped_to_gpu_memory);
+ }
+
+ ret = unreserve_bo_and_vms(&ctx, false, false);
+
+ goto out;
+
+out_unreserve:
+ unreserve_bo_and_vms(&ctx, false, false);
+out:
+ mutex_unlock(&mem->process_info->lock);
+ mutex_unlock(&mem->lock);
+ return ret;
+}
+
+int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv)
+{
+ struct kfd_mem_attachment *entry;
+ struct amdgpu_vm *vm;
+ int ret;
+
+ vm = drm_priv_to_vm(drm_priv);
+
+ mutex_lock(&mem->lock);
+
+ ret = amdgpu_bo_reserve(mem->bo, true);
+ if (ret)
+ goto out;
+
+ list_for_each_entry(entry, &mem->attachments, list) {
+ if (entry->bo_va->base.vm != vm)
+ continue;
+ if (entry->bo_va->base.bo->tbo.ttm &&
+ !entry->bo_va->base.bo->tbo.ttm->sg)
+ continue;
+
+ kfd_mem_dmaunmap_attachment(mem, entry);
+ }
+
+ amdgpu_bo_unreserve(mem->bo);
+out:
+ mutex_unlock(&mem->lock);
+
+ return ret;
+}
+
+int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
+ struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv)
+{
+ struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
+ unsigned long bo_size = mem->bo->tbo.base.size;
+ struct kfd_mem_attachment *entry;
+ struct bo_vm_reservation_context ctx;
+ int ret;
+
+ mutex_lock(&mem->lock);
+
+ ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx);
+ if (unlikely(ret))
+ goto out;
+ /* If no VMs were reserved, it means the BO wasn't actually mapped */
+ if (ctx.n_vms == 0) {
+ ret = -EINVAL;
+ goto unreserve_out;
+ }
+
+ ret = vm_validate_pt_pd_bos(avm, NULL);
+ if (unlikely(ret))
+ goto unreserve_out;
+
+ pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n",
+ mem->va,
+ mem->va + bo_size * (1 + mem->aql_queue),
+ avm);
+
+ list_for_each_entry(entry, &mem->attachments, list) {
+ if (entry->bo_va->base.vm != avm || !entry->is_mapped)
+ continue;
+
+ pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
+ entry->va, entry->va + bo_size, entry);
+
+ ret = unmap_bo_from_gpuvm(mem, entry, ctx.sync);
+ if (ret)
+ goto unreserve_out;
+
+ entry->is_mapped = false;
+
+ mem->mapped_to_gpu_memory--;
+ pr_debug("\t DEC mapping count %d\n",
+ mem->mapped_to_gpu_memory);
+ }
+
+unreserve_out:
+ unreserve_bo_and_vms(&ctx, false, false);
+out:
+ mutex_unlock(&mem->lock);
+ return ret;
+}
+
+int amdgpu_amdkfd_gpuvm_sync_memory(
+ struct amdgpu_device *adev, struct kgd_mem *mem, bool intr)
+{
+ struct amdgpu_sync sync;
+ int ret;
+
+ amdgpu_sync_create(&sync);
+
+ mutex_lock(&mem->lock);
+ amdgpu_sync_clone(&mem->sync, &sync);
+ mutex_unlock(&mem->lock);
+
+ ret = amdgpu_sync_wait(&sync, intr);
+ amdgpu_sync_free(&sync);
+ return ret;
+}
+
+/**
+ * amdgpu_amdkfd_map_gtt_bo_to_gart - Map BO to GART and increment reference count
+ * @bo: Buffer object to be mapped
+ * @bo_gart: Return bo reference
+ *
+ * Before return, bo reference count is incremented. To release the reference and unpin/
+ * unmap the BO, call amdgpu_amdkfd_free_gtt_mem.
+ */
+int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo, struct amdgpu_bo **bo_gart)
+{
+ int ret;
+
+ ret = amdgpu_bo_reserve(bo, true);
+ if (ret) {
+ pr_err("Failed to reserve bo. ret %d\n", ret);
+ goto err_reserve_bo_failed;
+ }
+
+ ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+ if (ret) {
+ pr_err("Failed to pin bo. ret %d\n", ret);
+ goto err_pin_bo_failed;
+ }
+
+ ret = amdgpu_ttm_alloc_gart(&bo->tbo);
+ if (ret) {
+ pr_err("Failed to bind bo to GART. ret %d\n", ret);
+ goto err_map_bo_gart_failed;
+ }
+
+ amdgpu_amdkfd_remove_eviction_fence(
+ bo, bo->vm_bo->vm->process_info->eviction_fence);
+
+ amdgpu_bo_unreserve(bo);
+
+ *bo_gart = amdgpu_bo_ref(bo);
+
+ return 0;
+
+err_map_bo_gart_failed:
+ amdgpu_bo_unpin(bo);
+err_pin_bo_failed:
+ amdgpu_bo_unreserve(bo);
+err_reserve_bo_failed:
+
+ return ret;
+}
+
+/** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Map a GTT BO for kernel CPU access
+ *
+ * @mem: Buffer object to be mapped for CPU access
+ * @kptr[out]: pointer in kernel CPU address space
+ * @size[out]: size of the buffer
+ *
+ * Pins the BO and maps it for kernel CPU access. The eviction fence is removed
+ * from the BO, since pinned BOs cannot be evicted. The bo must remain on the
+ * validate_list, so the GPU mapping can be restored after a page table was
+ * evicted.
+ *
+ * Return: 0 on success, error code on failure
+ */
+int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
+ void **kptr, uint64_t *size)
+{
+ int ret;
+ struct amdgpu_bo *bo = mem->bo;
+
+ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
+ pr_err("userptr can't be mapped to kernel\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&mem->process_info->lock);
+
+ ret = amdgpu_bo_reserve(bo, true);
+ if (ret) {
+ pr_err("Failed to reserve bo. ret %d\n", ret);
+ goto bo_reserve_failed;
+ }
+
+ ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+ if (ret) {
+ pr_err("Failed to pin bo. ret %d\n", ret);
+ goto pin_failed;
+ }
+
+ ret = amdgpu_bo_kmap(bo, kptr);
+ if (ret) {
+ pr_err("Failed to map bo to kernel. ret %d\n", ret);
+ goto kmap_failed;
+ }
+
+ amdgpu_amdkfd_remove_eviction_fence(
+ bo, mem->process_info->eviction_fence);
+
+ if (size)
+ *size = amdgpu_bo_size(bo);
+
+ amdgpu_bo_unreserve(bo);
+
+ mutex_unlock(&mem->process_info->lock);
+ return 0;
+
+kmap_failed:
+ amdgpu_bo_unpin(bo);
+pin_failed:
+ amdgpu_bo_unreserve(bo);
+bo_reserve_failed:
+ mutex_unlock(&mem->process_info->lock);
+
+ return ret;
+}
+
+/** amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel() - Unmap a GTT BO for kernel CPU access
+ *
+ * @mem: Buffer object to be unmapped for CPU access
+ *
+ * Removes the kernel CPU mapping and unpins the BO. It does not restore the
+ * eviction fence, so this function should only be used for cleanup before the
+ * BO is destroyed.
+ */
+void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem)
+{
+ struct amdgpu_bo *bo = mem->bo;
+
+ (void)amdgpu_bo_reserve(bo, true);
+ amdgpu_bo_kunmap(bo);
+ amdgpu_bo_unpin(bo);
+ amdgpu_bo_unreserve(bo);
+}
+
+int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
+ struct kfd_vm_fault_info *mem)
+{
+ if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
+ *mem = *adev->gmc.vm_fault_info;
+ mb(); /* make sure read happened */
+ atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+ }
+ return 0;
+}
+
+static int import_obj_create(struct amdgpu_device *adev,
+ struct dma_buf *dma_buf,
+ struct drm_gem_object *obj,
+ uint64_t va, void *drm_priv,
+ struct kgd_mem **mem, uint64_t *size,
+ uint64_t *mmap_offset)
+{
+ struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
+ struct amdgpu_bo *bo;
+ int ret;
+
+ bo = gem_to_amdgpu_bo(obj);
+ if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT)))
+ /* Only VRAM and GTT BOs are supported */
+ return -EINVAL;
+
+ *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
+ if (!*mem)
+ return -ENOMEM;
+
+ ret = drm_vma_node_allow(&obj->vma_node, drm_priv);
+ if (ret)
+ goto err_free_mem;
+
+ if (size)
+ *size = amdgpu_bo_size(bo);
+
+ if (mmap_offset)
+ *mmap_offset = amdgpu_bo_mmap_offset(bo);
+
+ INIT_LIST_HEAD(&(*mem)->attachments);
+ mutex_init(&(*mem)->lock);
+
+ (*mem)->alloc_flags =
+ ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
+ KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
+ | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
+ | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
+
+ get_dma_buf(dma_buf);
+ (*mem)->dmabuf = dma_buf;
+ (*mem)->bo = bo;
+ (*mem)->va = va;
+ (*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) &&
+ !adev->apu_prefer_gtt ?
+ AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
+
+ (*mem)->mapped_to_gpu_memory = 0;
+ (*mem)->process_info = avm->process_info;
+ add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
+ amdgpu_sync_create(&(*mem)->sync);
+ (*mem)->is_imported = true;
+
+ mutex_lock(&avm->process_info->lock);
+ if (avm->process_info->eviction_fence &&
+ !dma_fence_is_signaled(&avm->process_info->eviction_fence->base))
+ ret = amdgpu_amdkfd_bo_validate_and_fence(bo, (*mem)->domain,
+ &avm->process_info->eviction_fence->base);
+ mutex_unlock(&avm->process_info->lock);
+ if (ret)
+ goto err_remove_mem;
+
+ return 0;
+
+err_remove_mem:
+ remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
+ drm_vma_node_revoke(&obj->vma_node, drm_priv);
+err_free_mem:
+ kfree(*mem);
+ return ret;
+}
+
+int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd,
+ uint64_t va, void *drm_priv,
+ struct kgd_mem **mem, uint64_t *size,
+ uint64_t *mmap_offset)
+{
+ struct drm_gem_object *obj;
+ uint32_t handle;
+ int ret;
+
+ ret = drm_gem_prime_fd_to_handle(&adev->ddev, adev->kfd.client.file, fd,
+ &handle);
+ if (ret)
+ return ret;
+ obj = drm_gem_object_lookup(adev->kfd.client.file, handle);
+ if (!obj) {
+ ret = -EINVAL;
+ goto err_release_handle;
+ }
+
+ ret = import_obj_create(adev, obj->dma_buf, obj, va, drm_priv, mem, size,
+ mmap_offset);
+ if (ret)
+ goto err_put_obj;
+
+ (*mem)->gem_handle = handle;
+
+ return 0;
+
+err_put_obj:
+ drm_gem_object_put(obj);
+err_release_handle:
+ drm_gem_handle_delete(adev->kfd.client.file, handle);
+ return ret;
+}
+
+int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem,
+ struct dma_buf **dma_buf)
+{
+ int ret;
+
+ mutex_lock(&mem->lock);
+ ret = kfd_mem_export_dmabuf(mem);
+ if (ret)
+ goto out;
+
+ get_dma_buf(mem->dmabuf);
+ *dma_buf = mem->dmabuf;
+out:
+ mutex_unlock(&mem->lock);
+ return ret;
+}
+
+/* Evict a userptr BO by stopping the queues if necessary
+ *
+ * Runs in MMU notifier, may be in RECLAIM_FS context. This means it
+ * cannot do any memory allocations, and cannot take any locks that
+ * are held elsewhere while allocating memory.
+ *
+ * It doesn't do anything to the BO itself. The real work happens in
+ * restore, where we get updated page addresses. This function only
+ * ensures that GPU access to the BO is stopped.
+ */
+int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
+ unsigned long cur_seq, struct kgd_mem *mem)
+{
+ struct amdkfd_process_info *process_info = mem->process_info;
+ int r = 0;
+
+ /* Do not process MMU notifications during CRIU restore until
+ * KFD_CRIU_OP_RESUME IOCTL is received
+ */
+ if (READ_ONCE(process_info->block_mmu_notifications))
+ return 0;
+
+ mutex_lock(&process_info->notifier_lock);
+ mmu_interval_set_seq(mni, cur_seq);
+
+ mem->invalid++;
+ if (++process_info->evicted_bos == 1) {
+ /* First eviction, stop the queues */
+ r = kgd2kfd_quiesce_mm(mni->mm,
+ KFD_QUEUE_EVICTION_TRIGGER_USERPTR);
+
+ if (r && r != -ESRCH)
+ pr_err("Failed to quiesce KFD\n");
+
+ if (r != -ESRCH)
+ queue_delayed_work(system_freezable_wq,
+ &process_info->restore_userptr_work,
+ msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
+ }
+ mutex_unlock(&process_info->notifier_lock);
+
+ return r;
+}
+
+/* Update invalid userptr BOs
+ *
+ * Moves invalidated (evicted) userptr BOs from userptr_valid_list to
+ * userptr_inval_list and updates user pages for all BOs that have
+ * been invalidated since their last update.
+ */
+static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
+ struct mm_struct *mm)
+{
+ struct kgd_mem *mem, *tmp_mem;
+ struct amdgpu_bo *bo;
+ struct ttm_operation_ctx ctx = { false, false };
+ uint32_t invalid;
+ int ret = 0;
+
+ mutex_lock(&process_info->notifier_lock);
+
+ /* Move all invalidated BOs to the userptr_inval_list */
+ list_for_each_entry_safe(mem, tmp_mem,
+ &process_info->userptr_valid_list,
+ validate_list)
+ if (mem->invalid)
+ list_move_tail(&mem->validate_list,
+ &process_info->userptr_inval_list);
+
+ /* Go through userptr_inval_list and update any invalid user_pages */
+ list_for_each_entry(mem, &process_info->userptr_inval_list,
+ validate_list) {
+ invalid = mem->invalid;
+ if (!invalid)
+ /* BO hasn't been invalidated since the last
+ * revalidation attempt. Keep its page list.
+ */
+ continue;
+
+ bo = mem->bo;
+
+ amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range);
+ mem->range = NULL;
+
+ /* BO reservations and getting user pages (hmm_range_fault)
+ * must happen outside the notifier lock
+ */
+ mutex_unlock(&process_info->notifier_lock);
+
+ /* Move the BO to system (CPU) domain if necessary to unmap
+ * and free the SG table
+ */
+ if (bo->tbo.resource->mem_type != TTM_PL_SYSTEM) {
+ if (amdgpu_bo_reserve(bo, true))
+ return -EAGAIN;
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ amdgpu_bo_unreserve(bo);
+ if (ret) {
+ pr_err("%s: Failed to invalidate userptr BO\n",
+ __func__);
+ return -EAGAIN;
+ }
+ }
+
+ /* Get updated user pages */
+ ret = amdgpu_ttm_tt_get_user_pages(bo, &mem->range);
+ if (ret) {
+ pr_debug("Failed %d to get user pages\n", ret);
+
+ /* Return -EFAULT bad address error as success. It will
+ * fail later with a VM fault if the GPU tries to access
+ * it. Better than hanging indefinitely with stalled
+ * user mode queues.
+ *
+ * Return other error -EBUSY or -ENOMEM to retry restore
+ */
+ if (ret != -EFAULT)
+ return ret;
+
+ /* If applications unmap memory before destroying the userptr
+ * from the KFD, trigger a segmentation fault in VM debug mode.
+ */
+ if (amdgpu_ttm_adev(bo->tbo.bdev)->debug_vm_userptr) {
+ struct kfd_process *p;
+
+ pr_err("Pid %d unmapped memory before destroying userptr at GPU addr 0x%llx\n",
+ pid_nr(process_info->pid), mem->va);
+
+ // Send GPU VM fault to user space
+ p = kfd_lookup_process_by_pid(process_info->pid);
+ if (p) {
+ kfd_signal_vm_fault_event_with_userptr(p, mem->va);
+ kfd_unref_process(p);
+ }
+ }
+
+ ret = 0;
+ }
+
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, mem->range);
+
+ mutex_lock(&process_info->notifier_lock);
+
+ /* Mark the BO as valid unless it was invalidated
+ * again concurrently.
+ */
+ if (mem->invalid != invalid) {
+ ret = -EAGAIN;
+ goto unlock_out;
+ }
+ /* set mem valid if mem has hmm range associated */
+ if (mem->range)
+ mem->invalid = 0;
+ }
+
+unlock_out:
+ mutex_unlock(&process_info->notifier_lock);
+
+ return ret;
+}
+
+/* Validate invalid userptr BOs
+ *
+ * Validates BOs on the userptr_inval_list. Also updates GPUVM page tables
+ * with new page addresses and waits for the page table updates to complete.
+ */
+static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_sync sync;
+ struct drm_exec exec;
+
+ struct amdgpu_vm *peer_vm;
+ struct kgd_mem *mem, *tmp_mem;
+ struct amdgpu_bo *bo;
+ int ret;
+
+ amdgpu_sync_create(&sync);
+
+ drm_exec_init(&exec, 0, 0);
+ /* Reserve all BOs and page tables for validation */
+ drm_exec_until_all_locked(&exec) {
+ /* Reserve all the page directories */
+ list_for_each_entry(peer_vm, &process_info->vm_list_head,
+ vm_list_node) {
+ ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto unreserve_out;
+ }
+
+ /* Reserve the userptr_inval_list entries to resv_list */
+ list_for_each_entry(mem, &process_info->userptr_inval_list,
+ validate_list) {
+ struct drm_gem_object *gobj;
+
+ gobj = &mem->bo->tbo.base;
+ ret = drm_exec_prepare_obj(&exec, gobj, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto unreserve_out;
+ }
+ }
+
+ ret = process_validate_vms(process_info, NULL);
+ if (ret)
+ goto unreserve_out;
+
+ /* Validate BOs and update GPUVM page tables */
+ list_for_each_entry_safe(mem, tmp_mem,
+ &process_info->userptr_inval_list,
+ validate_list) {
+ struct kfd_mem_attachment *attachment;
+
+ bo = mem->bo;
+
+ /* Validate the BO if we got user pages */
+ if (bo->tbo.ttm->pages[0]) {
+ amdgpu_bo_placement_from_domain(bo, mem->domain);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret) {
+ pr_err("%s: failed to validate BO\n", __func__);
+ goto unreserve_out;
+ }
+ }
+
+ /* Update mapping. If the BO was not validated
+ * (because we couldn't get user pages), this will
+ * clear the page table entries, which will result in
+ * VM faults if the GPU tries to access the invalid
+ * memory.
+ */
+ list_for_each_entry(attachment, &mem->attachments, list) {
+ if (!attachment->is_mapped)
+ continue;
+
+ kfd_mem_dmaunmap_attachment(mem, attachment);
+ ret = update_gpuvm_pte(mem, attachment, &sync);
+ if (ret) {
+ pr_err("%s: update PTE failed\n", __func__);
+ /* make sure this gets validated again */
+ mutex_lock(&process_info->notifier_lock);
+ mem->invalid++;
+ mutex_unlock(&process_info->notifier_lock);
+ goto unreserve_out;
+ }
+ }
+ }
+
+ /* Update page directories */
+ ret = process_update_pds(process_info, &sync);
+
+unreserve_out:
+ drm_exec_fini(&exec);
+ amdgpu_sync_wait(&sync, false);
+ amdgpu_sync_free(&sync);
+
+ return ret;
+}
+
+/* Confirm that all user pages are valid while holding the notifier lock
+ *
+ * Moves valid BOs from the userptr_inval_list back to userptr_val_list.
+ */
+static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_info)
+{
+ struct kgd_mem *mem, *tmp_mem;
+ int ret = 0;
+
+ list_for_each_entry_safe(mem, tmp_mem,
+ &process_info->userptr_inval_list,
+ validate_list) {
+ bool valid;
+
+ /* keep mem without hmm range at userptr_inval_list */
+ if (!mem->range)
+ continue;
+
+ /* Only check mem with hmm range associated */
+ valid = amdgpu_ttm_tt_get_user_pages_done(
+ mem->bo->tbo.ttm, mem->range);
+
+ mem->range = NULL;
+ if (!valid) {
+ WARN(!mem->invalid, "Invalid BO not marked invalid");
+ ret = -EAGAIN;
+ continue;
+ }
+
+ if (mem->invalid) {
+ WARN(1, "Valid BO is marked invalid");
+ ret = -EAGAIN;
+ continue;
+ }
+
+ list_move_tail(&mem->validate_list,
+ &process_info->userptr_valid_list);
+ }
+
+ return ret;
+}
+
+/* Worker callback to restore evicted userptr BOs
+ *
+ * Tries to update and validate all userptr BOs. If successful and no
+ * concurrent evictions happened, the queues are restarted. Otherwise,
+ * reschedule for another attempt later.
+ */
+static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct amdkfd_process_info *process_info =
+ container_of(dwork, struct amdkfd_process_info,
+ restore_userptr_work);
+ struct task_struct *usertask;
+ struct mm_struct *mm;
+ uint32_t evicted_bos;
+
+ mutex_lock(&process_info->notifier_lock);
+ evicted_bos = process_info->evicted_bos;
+ mutex_unlock(&process_info->notifier_lock);
+ if (!evicted_bos)
+ return;
+
+ /* Reference task and mm in case of concurrent process termination */
+ usertask = get_pid_task(process_info->pid, PIDTYPE_PID);
+ if (!usertask)
+ return;
+ mm = get_task_mm(usertask);
+ if (!mm) {
+ put_task_struct(usertask);
+ return;
+ }
+
+ mutex_lock(&process_info->lock);
+
+ if (update_invalid_user_pages(process_info, mm))
+ goto unlock_out;
+ /* userptr_inval_list can be empty if all evicted userptr BOs
+ * have been freed. In that case there is nothing to validate
+ * and we can just restart the queues.
+ */
+ if (!list_empty(&process_info->userptr_inval_list)) {
+ if (validate_invalid_user_pages(process_info))
+ goto unlock_out;
+ }
+ /* Final check for concurrent evicton and atomic update. If
+ * another eviction happens after successful update, it will
+ * be a first eviction that calls quiesce_mm. The eviction
+ * reference counting inside KFD will handle this case.
+ */
+ mutex_lock(&process_info->notifier_lock);
+ if (process_info->evicted_bos != evicted_bos)
+ goto unlock_notifier_out;
+
+ if (confirm_valid_user_pages_locked(process_info)) {
+ WARN(1, "User pages unexpectedly invalid");
+ goto unlock_notifier_out;
+ }
+
+ process_info->evicted_bos = evicted_bos = 0;
+
+ if (kgd2kfd_resume_mm(mm)) {
+ pr_err("%s: Failed to resume KFD\n", __func__);
+ /* No recovery from this failure. Probably the CP is
+ * hanging. No point trying again.
+ */
+ }
+
+unlock_notifier_out:
+ mutex_unlock(&process_info->notifier_lock);
+unlock_out:
+ mutex_unlock(&process_info->lock);
+
+ /* If validation failed, reschedule another attempt */
+ if (evicted_bos) {
+ queue_delayed_work(system_freezable_wq,
+ &process_info->restore_userptr_work,
+ msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS));
+
+ kfd_smi_event_queue_restore_rescheduled(mm);
+ }
+ mmput(mm);
+ put_task_struct(usertask);
+}
+
+static void replace_eviction_fence(struct dma_fence __rcu **ef,
+ struct dma_fence *new_ef)
+{
+ struct dma_fence *old_ef = rcu_replace_pointer(*ef, new_ef, true
+ /* protected by process_info->lock */);
+
+ /* If we're replacing an unsignaled eviction fence, that fence will
+ * never be signaled, and if anyone is still waiting on that fence,
+ * they will hang forever. This should never happen. We should only
+ * replace the fence in restore_work that only gets scheduled after
+ * eviction work signaled the fence.
+ */
+ WARN_ONCE(!dma_fence_is_signaled(old_ef),
+ "Replacing unsignaled eviction fence");
+ dma_fence_put(old_ef);
+}
+
+/** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given
+ * KFD process identified by process_info
+ *
+ * @process_info: amdkfd_process_info of the KFD process
+ *
+ * After memory eviction, restore thread calls this function. The function
+ * should be called when the Process is still valid. BO restore involves -
+ *
+ * 1. Release old eviction fence and create new one
+ * 2. Get two copies of PD BO list from all the VMs. Keep one copy as pd_list.
+ * 3 Use the second PD list and kfd_bo_list to create a list (ctx.list) of
+ * BOs that need to be reserved.
+ * 4. Reserve all the BOs
+ * 5. Validate of PD and PT BOs.
+ * 6. Validate all KFD BOs using kfd_bo_list and Map them and add new fence
+ * 7. Add fence to all PD and PT BOs.
+ * 8. Unreserve all BOs
+ */
+int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu **ef)
+{
+ struct amdkfd_process_info *process_info = info;
+ struct amdgpu_vm *peer_vm;
+ struct kgd_mem *mem;
+ struct list_head duplicate_save;
+ struct amdgpu_sync sync_obj;
+ unsigned long failed_size = 0;
+ unsigned long total_size = 0;
+ struct drm_exec exec;
+ int ret;
+
+ INIT_LIST_HEAD(&duplicate_save);
+
+ mutex_lock(&process_info->lock);
+
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&exec) {
+ list_for_each_entry(peer_vm, &process_info->vm_list_head,
+ vm_list_node) {
+ ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret)) {
+ pr_err("Locking VM PD failed, ret: %d\n", ret);
+ goto ttm_reserve_fail;
+ }
+ }
+
+ /* Reserve all BOs and page tables/directory. Add all BOs from
+ * kfd_bo_list to ctx.list
+ */
+ list_for_each_entry(mem, &process_info->kfd_bo_list,
+ validate_list) {
+ struct drm_gem_object *gobj;
+
+ gobj = &mem->bo->tbo.base;
+ ret = drm_exec_prepare_obj(&exec, gobj, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret)) {
+ pr_err("drm_exec_prepare_obj failed, ret: %d\n", ret);
+ goto ttm_reserve_fail;
+ }
+ }
+ }
+
+ amdgpu_sync_create(&sync_obj);
+
+ /* Validate BOs managed by KFD */
+ list_for_each_entry(mem, &process_info->kfd_bo_list,
+ validate_list) {
+
+ struct amdgpu_bo *bo = mem->bo;
+ uint32_t domain = mem->domain;
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+
+ total_size += amdgpu_bo_size(bo);
+
+ ret = amdgpu_amdkfd_bo_validate(bo, domain, false);
+ if (ret) {
+ pr_debug("Memory eviction: Validate BOs failed\n");
+ failed_size += amdgpu_bo_size(bo);
+ ret = amdgpu_amdkfd_bo_validate(bo,
+ AMDGPU_GEM_DOMAIN_GTT, false);
+ if (ret) {
+ pr_debug("Memory eviction: Try again\n");
+ goto validate_map_fail;
+ }
+ }
+ dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
+ DMA_RESV_USAGE_KERNEL, fence) {
+ ret = amdgpu_sync_fence(&sync_obj, fence, GFP_KERNEL);
+ if (ret) {
+ pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
+ goto validate_map_fail;
+ }
+ }
+ }
+
+ if (failed_size)
+ pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
+
+ /* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO
+ * validations above would invalidate DMABuf imports again.
+ */
+ ret = process_validate_vms(process_info, &exec.ticket);
+ if (ret) {
+ pr_debug("Validating VMs failed, ret: %d\n", ret);
+ goto validate_map_fail;
+ }
+
+ /* Update mappings managed by KFD. */
+ list_for_each_entry(mem, &process_info->kfd_bo_list,
+ validate_list) {
+ struct kfd_mem_attachment *attachment;
+
+ list_for_each_entry(attachment, &mem->attachments, list) {
+ if (!attachment->is_mapped)
+ continue;
+
+ kfd_mem_dmaunmap_attachment(mem, attachment);
+ ret = update_gpuvm_pte(mem, attachment, &sync_obj);
+ if (ret) {
+ pr_debug("Memory eviction: update PTE failed. Try again\n");
+ goto validate_map_fail;
+ }
+ }
+ }
+
+ /* Update mappings not managed by KFD */
+ list_for_each_entry(peer_vm, &process_info->vm_list_head,
+ vm_list_node) {
+ struct amdgpu_device *adev = amdgpu_ttm_adev(
+ peer_vm->root.bo->tbo.bdev);
+
+ struct amdgpu_fpriv *fpriv =
+ container_of(peer_vm, struct amdgpu_fpriv, vm);
+
+ ret = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
+ if (ret) {
+ dev_dbg(adev->dev,
+ "Memory eviction: handle PRT moved failed, pid %8d. Try again.\n",
+ pid_nr(process_info->pid));
+ goto validate_map_fail;
+ }
+
+ ret = amdgpu_vm_handle_moved(adev, peer_vm, &exec.ticket);
+ if (ret) {
+ dev_dbg(adev->dev,
+ "Memory eviction: handle moved failed, pid %8d. Try again.\n",
+ pid_nr(process_info->pid));
+ goto validate_map_fail;
+ }
+ }
+
+ /* Update page directories */
+ ret = process_update_pds(process_info, &sync_obj);
+ if (ret) {
+ pr_debug("Memory eviction: update PDs failed. Try again\n");
+ goto validate_map_fail;
+ }
+
+ /* Sync with fences on all the page tables. They implicitly depend on any
+ * move fences from amdgpu_vm_handle_moved above.
+ */
+ ret = process_sync_pds_resv(process_info, &sync_obj);
+ if (ret) {
+ pr_debug("Memory eviction: Failed to sync to PD BO moving fence. Try again\n");
+ goto validate_map_fail;
+ }
+
+ /* Wait for validate and PT updates to finish */
+ amdgpu_sync_wait(&sync_obj, false);
+
+ /* The old eviction fence may be unsignaled if restore happens
+ * after a GPU reset or suspend/resume. Keep the old fence in that
+ * case. Otherwise release the old eviction fence and create new
+ * one, because fence only goes from unsignaled to signaled once
+ * and cannot be reused. Use context and mm from the old fence.
+ *
+ * If an old eviction fence signals after this check, that's OK.
+ * Anyone signaling an eviction fence must stop the queues first
+ * and schedule another restore worker.
+ */
+ if (dma_fence_is_signaled(&process_info->eviction_fence->base)) {
+ struct amdgpu_amdkfd_fence *new_fence =
+ amdgpu_amdkfd_fence_create(
+ process_info->eviction_fence->base.context,
+ process_info->eviction_fence->mm,
+ NULL);
+
+ if (!new_fence) {
+ pr_err("Failed to create eviction fence\n");
+ ret = -ENOMEM;
+ goto validate_map_fail;
+ }
+ dma_fence_put(&process_info->eviction_fence->base);
+ process_info->eviction_fence = new_fence;
+ replace_eviction_fence(ef, dma_fence_get(&new_fence->base));
+ } else {
+ WARN_ONCE(*ef != &process_info->eviction_fence->base,
+ "KFD eviction fence doesn't match KGD process_info");
+ }
+
+ /* Attach new eviction fence to all BOs except pinned ones */
+ list_for_each_entry(mem, &process_info->kfd_bo_list, validate_list) {
+ if (mem->bo->tbo.pin_count)
+ continue;
+
+ dma_resv_add_fence(mem->bo->tbo.base.resv,
+ &process_info->eviction_fence->base,
+ DMA_RESV_USAGE_BOOKKEEP);
+ }
+ /* Attach eviction fence to PD / PT BOs and DMABuf imports */
+ list_for_each_entry(peer_vm, &process_info->vm_list_head,
+ vm_list_node) {
+ struct amdgpu_bo *bo = peer_vm->root.bo;
+
+ dma_resv_add_fence(bo->tbo.base.resv,
+ &process_info->eviction_fence->base,
+ DMA_RESV_USAGE_BOOKKEEP);
+ }
+
+validate_map_fail:
+ amdgpu_sync_free(&sync_obj);
+ttm_reserve_fail:
+ drm_exec_fini(&exec);
+ mutex_unlock(&process_info->lock);
+ return ret;
+}
+
+int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem)
+{
+ struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
+ struct amdgpu_bo *gws_bo = (struct amdgpu_bo *)gws;
+ int ret;
+
+ if (!info || !gws)
+ return -EINVAL;
+
+ *mem = kzalloc(sizeof(struct kgd_mem), GFP_KERNEL);
+ if (!*mem)
+ return -ENOMEM;
+
+ mutex_init(&(*mem)->lock);
+ INIT_LIST_HEAD(&(*mem)->attachments);
+ (*mem)->bo = amdgpu_bo_ref(gws_bo);
+ (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS;
+ (*mem)->process_info = process_info;
+ add_kgd_mem_to_kfd_bo_list(*mem, process_info, false);
+ amdgpu_sync_create(&(*mem)->sync);
+
+
+ /* Validate gws bo the first time it is added to process */
+ mutex_lock(&(*mem)->process_info->lock);
+ ret = amdgpu_bo_reserve(gws_bo, false);
+ if (unlikely(ret)) {
+ pr_err("Reserve gws bo failed %d\n", ret);
+ goto bo_reservation_failure;
+ }
+
+ ret = amdgpu_amdkfd_bo_validate(gws_bo, AMDGPU_GEM_DOMAIN_GWS, true);
+ if (ret) {
+ pr_err("GWS BO validate failed %d\n", ret);
+ goto bo_validation_failure;
+ }
+ /* GWS resource is shared b/t amdgpu and amdkfd
+ * Add process eviction fence to bo so they can
+ * evict each other.
+ */
+ ret = dma_resv_reserve_fences(gws_bo->tbo.base.resv, 1);
+ if (ret)
+ goto reserve_shared_fail;
+ dma_resv_add_fence(gws_bo->tbo.base.resv,
+ &process_info->eviction_fence->base,
+ DMA_RESV_USAGE_BOOKKEEP);
+ amdgpu_bo_unreserve(gws_bo);
+ mutex_unlock(&(*mem)->process_info->lock);
+
+ return ret;
+
+reserve_shared_fail:
+bo_validation_failure:
+ amdgpu_bo_unreserve(gws_bo);
+bo_reservation_failure:
+ mutex_unlock(&(*mem)->process_info->lock);
+ amdgpu_sync_free(&(*mem)->sync);
+ remove_kgd_mem_from_kfd_bo_list(*mem, process_info);
+ amdgpu_bo_unref(&gws_bo);
+ mutex_destroy(&(*mem)->lock);
+ kfree(*mem);
+ *mem = NULL;
+ return ret;
+}
+
+int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
+{
+ int ret;
+ struct amdkfd_process_info *process_info = (struct amdkfd_process_info *)info;
+ struct kgd_mem *kgd_mem = (struct kgd_mem *)mem;
+ struct amdgpu_bo *gws_bo = kgd_mem->bo;
+
+ /* Remove BO from process's validate list so restore worker won't touch
+ * it anymore
+ */
+ remove_kgd_mem_from_kfd_bo_list(kgd_mem, process_info);
+
+ ret = amdgpu_bo_reserve(gws_bo, false);
+ if (unlikely(ret)) {
+ pr_err("Reserve gws bo failed %d\n", ret);
+ //TODO add BO back to validate_list?
+ return ret;
+ }
+ amdgpu_amdkfd_remove_eviction_fence(gws_bo,
+ process_info->eviction_fence);
+ amdgpu_bo_unreserve(gws_bo);
+ amdgpu_sync_free(&kgd_mem->sync);
+ amdgpu_bo_unref(&gws_bo);
+ mutex_destroy(&kgd_mem->lock);
+ kfree(mem);
+ return 0;
+}
+
+/* Returns GPU-specific tiling mode information */
+int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
+ struct tile_config *config)
+{
+ config->gb_addr_config = adev->gfx.config.gb_addr_config;
+ config->tile_config_ptr = adev->gfx.config.tile_mode_array;
+ config->num_tile_configs =
+ ARRAY_SIZE(adev->gfx.config.tile_mode_array);
+ config->macro_tile_config_ptr =
+ adev->gfx.config.macrotile_mode_array;
+ config->num_macro_tile_configs =
+ ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
+
+ /* Those values are not set from GFX9 onwards */
+ config->num_banks = adev->gfx.config.num_banks;
+ config->num_ranks = adev->gfx.config.num_ranks;
+
+ return 0;
+}
+
+bool amdgpu_amdkfd_bo_mapped_to_dev(void *drm_priv, struct kgd_mem *mem)
+{
+ struct amdgpu_vm *vm = drm_priv_to_vm(drm_priv);
+ struct kfd_mem_attachment *entry;
+
+ list_for_each_entry(entry, &mem->attachments, list) {
+ if (entry->is_mapped && entry->bo_va->base.vm == vm)
+ return true;
+ }
+ return false;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data)
+{
+
+ spin_lock(&kfd_mem_limit.mem_limit_lock);
+ seq_printf(m, "System mem used %lldM out of %lluM\n",
+ (kfd_mem_limit.system_mem_used >> 20),
+ (kfd_mem_limit.max_system_mem_limit >> 20));
+ seq_printf(m, "TTM mem used %lldM out of %lluM\n",
+ (kfd_mem_limit.ttm_mem_used >> 20),
+ (kfd_mem_limit.max_ttm_mem_limit >> 20));
+ spin_unlock(&kfd_mem_limit.mem_limit_lock);
+
+ return 0;
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 1cf78f4dd339..763f2b8dcf13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -23,24 +23,19 @@
* Authors: Dave Airlie
* Alex Deucher
*/
-#include <drm/drmP.h>
+
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_atombios.h"
+#include "amdgpu_atomfirmware.h"
#include "amdgpu_i2c.h"
+#include "amdgpu_display.h"
#include "atom.h"
#include "atom-bits.h"
#include "atombios_encoders.h"
#include "bif/bif_4_1_d.h"
-static void amdgpu_atombios_lookup_i2c_gpio_quirks(struct amdgpu_device *adev,
- ATOM_GPIO_I2C_ASSIGMENT *gpio,
- u8 index)
-{
-
-}
-
static struct amdgpu_i2c_bus_rec amdgpu_atombios_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio)
{
struct amdgpu_i2c_bus_rec i2c;
@@ -106,9 +101,6 @@ struct amdgpu_i2c_bus_rec amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device *
gpio = &i2c_info->asGPIO_Info[0];
for (i = 0; i < num_indices; i++) {
-
- amdgpu_atombios_lookup_i2c_gpio_quirks(adev, gpio, i);
-
if (gpio->sucI2cId.ucAccess == id) {
i2c = amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio);
break;
@@ -140,13 +132,43 @@ void amdgpu_atombios_i2c_init(struct amdgpu_device *adev)
gpio = &i2c_info->asGPIO_Info[0];
for (i = 0; i < num_indices; i++) {
- amdgpu_atombios_lookup_i2c_gpio_quirks(adev, gpio, i);
-
i2c = amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio);
if (i2c.valid) {
sprintf(stmp, "0x%x", i2c.i2c_id);
- adev->i2c_bus[i] = amdgpu_i2c_create(adev->ddev, &i2c, stmp);
+ adev->i2c_bus[i] = amdgpu_i2c_create(adev_to_drm(adev), &i2c, stmp);
+ }
+ gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
+ ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
+ }
+ }
+}
+
+void amdgpu_atombios_oem_i2c_init(struct amdgpu_device *adev, u8 i2c_id)
+{
+ struct atom_context *ctx = adev->mode_info.atom_context;
+ ATOM_GPIO_I2C_ASSIGMENT *gpio;
+ struct amdgpu_i2c_bus_rec i2c;
+ int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
+ struct _ATOM_GPIO_I2C_INFO *i2c_info;
+ uint16_t data_offset, size;
+ int i, num_indices;
+ char stmp[32];
+
+ if (amdgpu_atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
+ i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
+
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+
+ gpio = &i2c_info->asGPIO_Info[0];
+ for (i = 0; i < num_indices; i++) {
+ i2c = amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio);
+
+ if (i2c.valid && i2c.i2c_id == i2c_id) {
+ sprintf(stmp, "OEM 0x%x", i2c.i2c_id);
+ adev->i2c_bus[i] = amdgpu_i2c_create(adev_to_drm(adev), &i2c, stmp);
+ break;
}
gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
@@ -336,17 +358,9 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
path_size += le16_to_cpu(path->usSize);
if (device_support & le16_to_cpu(path->usDeviceTag)) {
- uint8_t con_obj_id, con_obj_num, con_obj_type;
-
- con_obj_id =
+ uint8_t con_obj_id =
(le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
>> OBJECT_ID_SHIFT;
- con_obj_num =
- (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK)
- >> ENUM_ID_SHIFT;
- con_obj_type =
- (le16_to_cpu(path->usConnObjectId) &
- OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
/* Skip TV/CV support */
if ((le16_to_cpu(path->usDeviceTag) ==
@@ -371,15 +385,7 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
router.ddc_valid = false;
router.cd_valid = false;
for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
- uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
-
- grph_obj_id =
- (le16_to_cpu(path->usGraphicObjIds[j]) &
- OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- grph_obj_num =
- (le16_to_cpu(path->usGraphicObjIds[j]) &
- ENUM_ID_MASK) >> ENUM_ID_SHIFT;
- grph_obj_type =
+ uint8_t grph_obj_type =
(le16_to_cpu(path->usGraphicObjIds[j]) &
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
@@ -555,7 +561,7 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
}
}
- amdgpu_link_encoder_connector(adev->ddev);
+ amdgpu_link_encoder_connector(adev_to_drm(adev));
return true;
}
@@ -690,13 +696,16 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
/* set a reasonable default for DP */
if (adev->clock.default_dispclk < 53900) {
- DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
- adev->clock.default_dispclk / 100);
+ DRM_DEBUG("Changing default dispclk from %dMhz to 600Mhz\n",
+ adev->clock.default_dispclk / 100);
adev->clock.default_dispclk = 60000;
+ } else if (adev->clock.default_dispclk <= 60000) {
+ DRM_DEBUG("Changing default dispclk from %dMhz to 625Mhz\n",
+ adev->clock.default_dispclk / 100);
+ adev->clock.default_dispclk = 62500;
}
adev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
- adev->clock.current_dispclk = adev->clock.default_dispclk;
adev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock);
if (adev->clock.max_pixel_clock == 0)
@@ -1028,7 +1037,9 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
dividers->post_div = args.v3.ucPostDiv;
dividers->enable_post_div = (args.v3.ucCntlFlag &
@@ -1048,7 +1059,9 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
if (strobe_mode)
args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
dividers->post_div = args.v5.ucPostDiv;
dividers->enable_post_div = (args.v5.ucCntlFlag &
@@ -1066,7 +1079,9 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
/* fusion */
args.v4.ulClock = cpu_to_le32(clock); /* 10 khz */
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
dividers->post_divider = dividers->post_div = args.v4.ucPostDiv;
dividers->real_clock = le32_to_cpu(args.v4.ulClock);
@@ -1077,7 +1092,9 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
args.v6_in.ulClock.ulComputeClockFlag = clock_type;
args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock); /* 10 khz */
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv);
dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac);
@@ -1093,6 +1110,7 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
return 0;
}
+#ifdef CONFIG_DRM_AMDGPU_SI
int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
u32 clock,
bool strobe_mode,
@@ -1118,7 +1136,9 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
if (strobe_mode)
args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac);
mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv);
@@ -1144,8 +1164,8 @@ int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
return 0;
}
-void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
- u32 eng_clock, u32 mem_clock)
+int amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
+ u32 eng_clock, u32 mem_clock)
{
SET_ENGINE_CLOCK_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings);
@@ -1160,7 +1180,8 @@ void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
if (mem_clock)
args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK);
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ return amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
+ (uint32_t *)&args, sizeof(args));
}
void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
@@ -1214,7 +1235,9 @@ int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
args.v2.ucVoltageMode = 0;
args.v2.usVoltageLevel = 0;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
*voltage = le16_to_cpu(args.v2.usVoltageLevel);
break;
@@ -1223,7 +1246,9 @@ int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
+ index, (uint32_t *)&args, sizeof(args)))
+ return -EINVAL;
*voltage = le16_to_cpu(args.v3.usVoltageLevel);
break;
@@ -1242,157 +1267,6 @@ int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *
return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
}
-int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev,
- u16 *leakage_id)
-{
- union set_voltage args;
- int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
- u8 frev, crev;
-
- if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
- return -EINVAL;
-
- switch (crev) {
- case 3:
- case 4:
- args.v3.ucVoltageType = 0;
- args.v3.ucVoltageMode = ATOM_GET_LEAKAGE_ID;
- args.v3.usVoltageLevel = 0;
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-
- *leakage_id = le16_to_cpu(args.v3.usVoltageLevel);
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(struct amdgpu_device *adev,
- u16 *vddc, u16 *vddci,
- u16 virtual_voltage_id,
- u16 vbios_voltage_id)
-{
- int index = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
- u8 frev, crev;
- u16 data_offset, size;
- int i, j;
- ATOM_ASIC_PROFILING_INFO_V2_1 *profile;
- u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf;
-
- *vddc = 0;
- *vddci = 0;
-
- if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset))
- return -EINVAL;
-
- profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
- (adev->mode_info.atom_context->bios + data_offset);
-
- switch (frev) {
- case 1:
- return -EINVAL;
- case 2:
- switch (crev) {
- case 1:
- if (size < sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))
- return -EINVAL;
- leakage_bin = (u16 *)
- (adev->mode_info.atom_context->bios + data_offset +
- le16_to_cpu(profile->usLeakageBinArrayOffset));
- vddc_id_buf = (u16 *)
- (adev->mode_info.atom_context->bios + data_offset +
- le16_to_cpu(profile->usElbVDDC_IdArrayOffset));
- vddc_buf = (u16 *)
- (adev->mode_info.atom_context->bios + data_offset +
- le16_to_cpu(profile->usElbVDDC_LevelArrayOffset));
- vddci_id_buf = (u16 *)
- (adev->mode_info.atom_context->bios + data_offset +
- le16_to_cpu(profile->usElbVDDCI_IdArrayOffset));
- vddci_buf = (u16 *)
- (adev->mode_info.atom_context->bios + data_offset +
- le16_to_cpu(profile->usElbVDDCI_LevelArrayOffset));
-
- if (profile->ucElbVDDC_Num > 0) {
- for (i = 0; i < profile->ucElbVDDC_Num; i++) {
- if (vddc_id_buf[i] == virtual_voltage_id) {
- for (j = 0; j < profile->ucLeakageBinNum; j++) {
- if (vbios_voltage_id <= leakage_bin[j]) {
- *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i];
- break;
- }
- }
- break;
- }
- }
- }
- if (profile->ucElbVDDCI_Num > 0) {
- for (i = 0; i < profile->ucElbVDDCI_Num; i++) {
- if (vddci_id_buf[i] == virtual_voltage_id) {
- for (j = 0; j < profile->ucLeakageBinNum; j++) {
- if (vbios_voltage_id <= leakage_bin[j]) {
- *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
- break;
- }
- }
- break;
- }
- }
- }
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- return -EINVAL;
- }
- break;
- default:
- DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
- return -EINVAL;
- }
-
- return 0;
-}
-
-union get_voltage_info {
- struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 in;
- struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 evv_out;
-};
-
-int amdgpu_atombios_get_voltage_evv(struct amdgpu_device *adev,
- u16 virtual_voltage_id,
- u16 *voltage)
-{
- int index = GetIndexIntoMasterTable(COMMAND, GetVoltageInfo);
- u32 entry_id;
- u32 count = adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count;
- union get_voltage_info args;
-
- for (entry_id = 0; entry_id < count; entry_id++) {
- if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].v ==
- virtual_voltage_id)
- break;
- }
-
- if (entry_id >= count)
- return -EINVAL;
-
- args.in.ucVoltageType = VOLTAGE_TYPE_VDDC;
- args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
- args.in.usVoltageLevel = cpu_to_le16(virtual_voltage_id);
- args.in.ulSCLKFreq =
- cpu_to_le32(adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);
-
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
-
- *voltage = le16_to_cpu(args.evv_out.usVoltageLevel);
-
- return 0;
-}
-
union voltage_object_info {
struct _ATOM_VOLTAGE_OBJECT_INFO v1;
struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2;
@@ -1411,7 +1285,7 @@ static ATOM_VOLTAGE_OBJECT_V3 *amdgpu_atombios_lookup_voltage_object_v3(ATOM_VOL
{
u32 size = le16_to_cpu(v3->sHeader.usStructureSize);
u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0]);
- u8 *start = (u8*)v3;
+ u8 *start = (u8 *)v3;
while (offset < size) {
ATOM_VOLTAGE_OBJECT_V3 *vo = (ATOM_VOLTAGE_OBJECT_V3 *)(start + offset);
@@ -1636,6 +1510,8 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
(u32)le32_to_cpu(*((u32 *)reg_data + j));
j++;
} else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
+ if (i == 0)
+ continue;
reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
reg_table->mc_reg_table_entry[num_ranges].mc_data[i - 1];
}
@@ -1664,6 +1540,7 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
}
return -EINVAL;
}
+#endif
bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev)
{
@@ -1682,7 +1559,7 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock)
{
uint32_t bios_6_scratch;
- bios_6_scratch = RREG32(mmBIOS_SCRATCH_6);
+ bios_6_scratch = RREG32(adev->bios_scratch_reg_offset + 6);
if (lock) {
bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
@@ -1692,15 +1569,17 @@ void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock)
bios_6_scratch |= ATOM_S6_ACC_MODE;
}
- WREG32(mmBIOS_SCRATCH_6, bios_6_scratch);
+ WREG32(adev->bios_scratch_reg_offset + 6, bios_6_scratch);
}
-void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev)
+static void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev)
{
uint32_t bios_2_scratch, bios_6_scratch;
- bios_2_scratch = RREG32(mmBIOS_SCRATCH_2);
- bios_6_scratch = RREG32(mmBIOS_SCRATCH_6);
+ adev->bios_scratch_reg_offset = mmBIOS_SCRATCH_0;
+
+ bios_2_scratch = RREG32(adev->bios_scratch_reg_offset + 2);
+ bios_6_scratch = RREG32(adev->bios_scratch_reg_offset + 6);
/* let the bios control the backlight */
bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
@@ -1711,86 +1590,86 @@ void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev)
/* clear the vbios dpms state */
bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE;
- WREG32(mmBIOS_SCRATCH_2, bios_2_scratch);
- WREG32(mmBIOS_SCRATCH_6, bios_6_scratch);
+ WREG32(adev->bios_scratch_reg_offset + 2, bios_2_scratch);
+ WREG32(adev->bios_scratch_reg_offset + 6, bios_6_scratch);
}
-void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev)
+void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
+ bool hung)
{
- int i;
+ u32 tmp = RREG32(adev->bios_scratch_reg_offset + 3);
+
+ if (hung)
+ tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+ else
+ tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
- for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
- adev->bios_scratch[i] = RREG32(mmBIOS_SCRATCH_0 + i);
+ WREG32(adev->bios_scratch_reg_offset + 3, tmp);
}
-void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev)
+void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev,
+ u32 backlight_level)
{
- int i;
+ u32 tmp = RREG32(adev->bios_scratch_reg_offset + 2);
- /*
- * VBIOS will check ASIC_INIT_COMPLETE bit to decide if
- * execute ASIC_Init posting via driver
- */
- adev->bios_scratch[7] &= ~ATOM_S7_ASIC_INIT_COMPLETE_MASK;
+ tmp &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
+ tmp |= (backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) &
+ ATOM_S2_CURRENT_BL_LEVEL_MASK;
- for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
- WREG32(mmBIOS_SCRATCH_0 + i, adev->bios_scratch[i]);
+ WREG32(adev->bios_scratch_reg_offset + 2, tmp);
}
-void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
- bool hung)
+bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev)
{
- u32 tmp = RREG32(mmBIOS_SCRATCH_3);
+ u32 tmp = RREG32(adev->bios_scratch_reg_offset + 7);
- if (hung)
- tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+ if (tmp & ATOM_S7_ASIC_INIT_COMPLETE_MASK)
+ return false;
else
- tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
-
- WREG32(mmBIOS_SCRATCH_3, tmp);
+ return true;
}
-/* Atom needs data in little endian format
- * so swap as appropriate when copying data to
- * or from atom. Note that atom operates on
- * dw units.
+/* Atom needs data in little endian format so swap as appropriate when copying
+ * data to or from atom. Note that atom operates on dw units.
+ *
+ * Use to_le=true when sending data to atom and provide at least
+ * ALIGN(num_bytes,4) bytes in the dst buffer.
+ *
+ * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
+ * byes in the src buffer.
*/
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
{
#ifdef __BIG_ENDIAN
- u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
- u32 *dst32, *src32;
+ u32 src_tmp[5], dst_tmp[5];
int i;
+ u8 align_num_bytes = ALIGN(num_bytes, 4);
- memcpy(src_tmp, src, num_bytes);
- src32 = (u32 *)src_tmp;
- dst32 = (u32 *)dst_tmp;
if (to_le) {
- for (i = 0; i < ((num_bytes + 3) / 4); i++)
- dst32[i] = cpu_to_le32(src32[i]);
- memcpy(dst, dst_tmp, num_bytes);
+ memcpy(src_tmp, src, num_bytes);
+ for (i = 0; i < align_num_bytes / 4; i++)
+ dst_tmp[i] = cpu_to_le32(src_tmp[i]);
+ memcpy(dst, dst_tmp, align_num_bytes);
} else {
- u8 dws = num_bytes & ~3;
- for (i = 0; i < ((num_bytes + 3) / 4); i++)
- dst32[i] = le32_to_cpu(src32[i]);
- memcpy(dst, dst_tmp, dws);
- if (num_bytes % 4) {
- for (i = 0; i < (num_bytes % 4); i++)
- dst[dws+i] = dst_tmp[dws+i];
- }
+ memcpy(src_tmp, src, align_num_bytes);
+ for (i = 0; i < align_num_bytes / 4; i++)
+ dst_tmp[i] = le32_to_cpu(src_tmp[i]);
+ memcpy(dst, dst_tmp, num_bytes);
}
#else
memcpy(dst, src, num_bytes);
#endif
}
-int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
+static int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
{
struct atom_context *ctx = adev->mode_info.atom_context;
int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
uint16_t data_offset;
int usage_bytes = 0;
struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
+ u64 start_addr;
+ u64 size;
if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
@@ -1799,7 +1678,21 @@ int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
- usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
+ start_addr = firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware;
+ size = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb;
+
+ if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
+ (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
+ ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
+ /* Firmware request VRAM reservation for SR-IOV */
+ adev->mman.fw_vram_usage_start_offset = (start_addr &
+ (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
+ adev->mman.fw_vram_usage_size = size << 10;
+ /* Use the default scratch size */
+ usage_bytes = 0;
+ } else {
+ usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
+ }
}
ctx->scratch_size_bytes = 0;
if (usage_bytes == 0)
@@ -1811,3 +1704,248 @@ int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
ctx->scratch_size_bytes = usage_bytes;
return 0;
}
+
+/* ATOM accessor methods */
+/*
+ * ATOM is an interpreted byte code stored in tables in the vbios. The
+ * driver registers callbacks to access registers and the interpreter
+ * in the driver parses the tables and executes then to program specific
+ * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
+ * atombios.h, and atom.c
+ */
+
+/**
+ * cail_pll_read - read PLL register
+ *
+ * @info: atom card_info pointer
+ * @reg: PLL register offset
+ *
+ * Provides a PLL register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the PLL register.
+ */
+static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
+{
+ return 0;
+}
+
+/**
+ * cail_pll_write - write PLL register
+ *
+ * @info: atom card_info pointer
+ * @reg: PLL register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a PLL register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+
+}
+
+/**
+ * cail_mc_read - read MC (Memory Controller) register
+ *
+ * @info: atom card_info pointer
+ * @reg: MC register offset
+ *
+ * Provides an MC register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the MC register.
+ */
+static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
+{
+ return 0;
+}
+
+/**
+ * cail_mc_write - write MC (Memory Controller) register
+ *
+ * @info: atom card_info pointer
+ * @reg: MC register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a MC register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+
+}
+
+/**
+ * cail_reg_write - write MMIO register
+ *
+ * @info: atom card_info pointer
+ * @reg: MMIO register offset
+ * @val: value to write to the pll register
+ *
+ * Provides a MMIO register accessor for the atom interpreter (r4xx+).
+ */
+static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+ struct amdgpu_device *adev = drm_to_adev(info->dev);
+
+ WREG32(reg, val);
+}
+
+/**
+ * cail_reg_read - read MMIO register
+ *
+ * @info: atom card_info pointer
+ * @reg: MMIO register offset
+ *
+ * Provides an MMIO register accessor for the atom interpreter (r4xx+).
+ * Returns the value of the MMIO register.
+ */
+static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
+{
+ struct amdgpu_device *adev = drm_to_adev(info->dev);
+ uint32_t r;
+
+ r = RREG32(reg);
+ return r;
+}
+
+static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct atom_context *ctx = adev->mode_info.atom_context;
+
+ return sysfs_emit(buf, "%s\n", ctx->vbios_pn);
+}
+
+static ssize_t amdgpu_atombios_get_vbios_build(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct atom_context *ctx = adev->mode_info.atom_context;
+
+ return sysfs_emit(buf, "%s\n", ctx->build_num);
+}
+
+static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
+ NULL);
+static DEVICE_ATTR(vbios_build, 0444, amdgpu_atombios_get_vbios_build, NULL);
+
+static struct attribute *amdgpu_vbios_version_attrs[] = {
+ &dev_attr_vbios_version.attr, &dev_attr_vbios_build.attr, NULL
+};
+
+static umode_t amdgpu_vbios_version_attrs_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int index)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct atom_context *ctx = adev->mode_info.atom_context;
+
+ if (attr == &dev_attr_vbios_build.attr && !strlen(ctx->build_num))
+ return 0;
+
+ return attr->mode;
+}
+
+const struct attribute_group amdgpu_vbios_version_attr_group = {
+ .attrs = amdgpu_vbios_version_attrs,
+ .is_visible = amdgpu_vbios_version_attrs_is_visible,
+};
+
+int amdgpu_atombios_sysfs_init(struct amdgpu_device *adev)
+{
+ if (adev->mode_info.atom_context)
+ return devm_device_add_group(adev->dev,
+ &amdgpu_vbios_version_attr_group);
+
+ return 0;
+}
+
+/**
+ * amdgpu_atombios_fini - free the driver info and callbacks for atombios
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Frees the driver info and register access callbacks for the ATOM
+ * interpreter (r4xx+).
+ * Called at driver shutdown.
+ */
+void amdgpu_atombios_fini(struct amdgpu_device *adev)
+{
+ if (adev->mode_info.atom_context) {
+ kfree(adev->mode_info.atom_context->scratch);
+ kfree(adev->mode_info.atom_context->iio);
+ }
+ kfree(adev->mode_info.atom_context);
+ adev->mode_info.atom_context = NULL;
+ kfree(adev->mode_info.atom_card_info);
+ adev->mode_info.atom_card_info = NULL;
+}
+
+/**
+ * amdgpu_atombios_init - init the driver info and callbacks for atombios
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initializes the driver info and register access callbacks for the
+ * ATOM interpreter (r4xx+).
+ * Returns 0 on sucess, -ENOMEM on failure.
+ * Called at driver startup.
+ */
+int amdgpu_atombios_init(struct amdgpu_device *adev)
+{
+ struct card_info *atom_card_info =
+ kzalloc(sizeof(struct card_info), GFP_KERNEL);
+
+ if (!atom_card_info)
+ return -ENOMEM;
+
+ adev->mode_info.atom_card_info = atom_card_info;
+ atom_card_info->dev = adev_to_drm(adev);
+ atom_card_info->reg_read = cail_reg_read;
+ atom_card_info->reg_write = cail_reg_write;
+ atom_card_info->mc_read = cail_mc_read;
+ atom_card_info->mc_write = cail_mc_write;
+ atom_card_info->pll_read = cail_pll_read;
+ atom_card_info->pll_write = cail_pll_write;
+
+ adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
+ if (!adev->mode_info.atom_context) {
+ amdgpu_atombios_fini(adev);
+ return -ENOMEM;
+ }
+
+ mutex_init(&adev->mode_info.atom_context->mutex);
+ if (adev->is_atom_fw) {
+ amdgpu_atomfirmware_scratch_regs_init(adev);
+ amdgpu_atomfirmware_allocate_fb_scratch(adev);
+ /* cached firmware_flags for further usage */
+ adev->mode_info.firmware_flags =
+ amdgpu_atomfirmware_query_firmware_capability(adev);
+ } else {
+ amdgpu_atombios_scratch_regs_init(adev);
+ amdgpu_atombios_allocate_fb_scratch(adev);
+ }
+
+ return 0;
+}
+
+int amdgpu_atombios_get_data_table(struct amdgpu_device *adev,
+ uint32_t table,
+ uint16_t *size,
+ uint8_t *frev,
+ uint8_t *crev,
+ uint8_t **addr)
+{
+ uint16_t data_start;
+
+ if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
+ size, frev, crev, &data_start))
+ return -EINVAL;
+
+ *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 38d0fe32e5cd..867bc5c5ce67 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -89,8 +89,7 @@ struct atom_memory_info {
#define MAX_AC_TIMING_ENTRIES 16
-struct atom_memory_clock_range_table
-{
+struct atom_memory_clock_range_table {
u8 num_entries;
u8 rsv[3];
u32 mclk[MAX_AC_TIMING_ENTRIES];
@@ -118,14 +117,12 @@ struct atom_mc_reg_table {
#define MAX_VOLTAGE_ENTRIES 32
-struct atom_voltage_table_entry
-{
+struct atom_voltage_table_entry {
u16 value;
u32 smio_low;
};
-struct atom_voltage_table
-{
+struct atom_voltage_table {
u32 count;
u32 mask_low;
u32 phase_delay;
@@ -139,6 +136,7 @@ amdgpu_atombios_lookup_gpio(struct amdgpu_device *adev,
struct amdgpu_i2c_bus_rec amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device *adev,
uint8_t id);
void amdgpu_atombios_i2c_init(struct amdgpu_device *adev);
+void amdgpu_atombios_oem_i2c_init(struct amdgpu_device *adev, u8 i2c_id);
bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev);
@@ -160,25 +158,14 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
bool strobe_mode,
struct atom_clock_dividers *dividers);
+#ifdef CONFIG_DRM_AMDGPU_SI
int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
u32 clock,
bool strobe_mode,
struct atom_mpll_param *mpll_param);
-void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
- u32 eng_clock, u32 mem_clock);
-
-int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device *adev,
- u16 *leakage_id);
-
-int amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(struct amdgpu_device *adev,
- u16 *vddc, u16 *vddci,
- u16 virtual_voltage_id,
- u16 vbios_voltage_id);
-
-int amdgpu_atombios_get_voltage_evv(struct amdgpu_device *adev,
- u16 virtual_voltage_id,
- u16 *voltage);
+int amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
+ u32 eng_clock, u32 mem_clock);
bool
amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev,
@@ -191,33 +178,43 @@ int amdgpu_atombios_get_voltage_table(struct amdgpu_device *adev,
int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
u8 module_index,
struct atom_mc_reg_table *reg_table);
+int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
+ u16 voltage_id, u16 *voltage);
+int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *adev,
+ u16 *voltage,
+ u16 leakage_idx);
+void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
+ u16 *vddc, u16 *vddci, u16 *mvdd);
+int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
+ u8 voltage_type,
+ u8 *svd_gpio_id, u8 *svc_gpio_id);
+#endif
bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock);
-void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev);
-void amdgpu_atombios_scratch_regs_save(struct amdgpu_device *adev);
-void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device *adev);
void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
bool hung);
+void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev,
+ u32 backlight_level);
+bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev);
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
-int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
- u16 voltage_id, u16 *voltage);
-int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *adev,
- u16 *voltage,
- u16 leakage_idx);
-void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
- u16 *vddc, u16 *vddci, u16 *mvdd);
int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
u8 clock_type,
u32 clock,
bool strobe_mode,
struct atom_clock_dividers *dividers);
-int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
- u8 voltage_type,
- u8 *svd_gpio_id, u8 *svc_gpio_id);
-int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev);
+int amdgpu_atombios_get_data_table(struct amdgpu_device *adev,
+ uint32_t table,
+ uint16_t *size,
+ uint8_t *frev,
+ uint8_t *crev,
+ uint8_t **addr);
+
+void amdgpu_atombios_fini(struct amdgpu_device *adev);
+int amdgpu_atombios_init(struct amdgpu_device *adev);
+int amdgpu_atombios_sysfs_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 4bdda56fccee..c7d32fb216e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -20,33 +20,69 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <drm/drmP.h>
+
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "atom.h"
#include "atombios.h"
+#include "soc15_hw_ip.h"
-#define get_index_into_master_table(master_table, table_name) (offsetof(struct master_table, table_name) / sizeof(uint16_t))
+union firmware_info {
+ struct atom_firmware_info_v3_1 v31;
+ struct atom_firmware_info_v3_2 v32;
+ struct atom_firmware_info_v3_3 v33;
+ struct atom_firmware_info_v3_4 v34;
+ struct atom_firmware_info_v3_5 v35;
+};
-bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
+/*
+ * Helper function to query firmware capability
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Return firmware_capability in firmwareinfo table on success or 0 if not
+ */
+uint32_t amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device *adev)
{
- int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
- firmwareinfo);
- uint16_t data_offset;
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index;
+ u16 data_offset, size;
+ union firmware_info *firmware_info;
+ u8 frev, crev;
+ u32 fw_cap = 0;
- if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL,
- NULL, NULL, &data_offset)) {
- struct atom_firmware_info_v3_1 *firmware_info =
- (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios +
- data_offset);
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ firmwareinfo);
- if (le32_to_cpu(firmware_info->firmware_capability) &
- ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION)
- return true;
+ if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
+ index, &size, &frev, &crev, &data_offset)) {
+ /* support firmware_info 3.1 + */
+ if ((frev == 3 && crev >= 1) || (frev > 3)) {
+ firmware_info = (union firmware_info *)
+ (mode_info->atom_context->bios + data_offset);
+ fw_cap = le32_to_cpu(firmware_info->v31.firmware_capability);
+ }
}
- return false;
+
+ return fw_cap;
+}
+
+/*
+ * Helper function to query gpu virtualizaiton capability
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Return true if gpu virtualization is supported or false if not
+ */
+bool amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device *adev)
+{
+ u32 fw_cap;
+
+ fw_cap = adev->mode_info.firmware_flags;
+
+ return (fw_cap & ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION) ? true : false;
}
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
@@ -66,39 +102,72 @@ void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev)
}
}
-void amdgpu_atomfirmware_scratch_regs_save(struct amdgpu_device *adev)
+static int amdgpu_atomfirmware_allocate_fb_v2_1(struct amdgpu_device *adev,
+ struct vram_usagebyfirmware_v2_1 *fw_usage, int *usage_bytes)
{
- int i;
+ u32 start_addr, fw_size, drv_size;
+
+ start_addr = le32_to_cpu(fw_usage->start_address_in_kb);
+ fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb);
+ drv_size = le16_to_cpu(fw_usage->used_by_driver_in_kb);
+
+ DRM_DEBUG("atom firmware v2_1 requested %08x %dkb fw %dkb drv\n",
+ start_addr,
+ fw_size,
+ drv_size);
- for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
- adev->bios_scratch[i] = RREG32(adev->bios_scratch_reg_offset + i);
+ if ((start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
+ (u32)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
+ ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
+ /* Firmware request VRAM reservation for SR-IOV */
+ adev->mman.fw_vram_usage_start_offset = (start_addr &
+ (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
+ adev->mman.fw_vram_usage_size = fw_size << 10;
+ /* Use the default scratch size */
+ *usage_bytes = 0;
+ } else {
+ *usage_bytes = drv_size << 10;
+ }
+ return 0;
}
-void amdgpu_atomfirmware_scratch_regs_restore(struct amdgpu_device *adev)
+static int amdgpu_atomfirmware_allocate_fb_v2_2(struct amdgpu_device *adev,
+ struct vram_usagebyfirmware_v2_2 *fw_usage, int *usage_bytes)
{
- int i;
+ u32 fw_start_addr, fw_size, drv_start_addr, drv_size;
- /*
- * VBIOS will check ASIC_INIT_COMPLETE bit to decide if
- * execute ASIC_Init posting via driver
- */
- adev->bios_scratch[7] &= ~ATOM_S7_ASIC_INIT_COMPLETE_MASK;
+ fw_start_addr = le32_to_cpu(fw_usage->fw_region_start_address_in_kb);
+ fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb);
- for (i = 0; i < AMDGPU_BIOS_NUM_SCRATCH; i++)
- WREG32(adev->bios_scratch_reg_offset + i, adev->bios_scratch[i]);
-}
+ drv_start_addr = le32_to_cpu(fw_usage->driver_region0_start_address_in_kb);
+ drv_size = le32_to_cpu(fw_usage->used_by_driver_region0_in_kb);
-void amdgpu_atomfirmware_scratch_regs_engine_hung(struct amdgpu_device *adev,
- bool hung)
-{
- u32 tmp = RREG32(adev->bios_scratch_reg_offset + 3);
+ DRM_DEBUG("atom requested fw start at %08x %dkb and drv start at %08x %dkb\n",
+ fw_start_addr,
+ fw_size,
+ drv_start_addr,
+ drv_size);
+
+ if (amdgpu_sriov_vf(adev) &&
+ ((fw_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION <<
+ ATOM_VRAM_OPERATION_FLAGS_SHIFT)) == 0)) {
+ /* Firmware request VRAM reservation for SR-IOV */
+ adev->mman.fw_vram_usage_start_offset = (fw_start_addr &
+ (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
+ adev->mman.fw_vram_usage_size = fw_size << 10;
+ }
- if (hung)
- tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
- else
- tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
+ if (amdgpu_sriov_vf(adev) &&
+ ((drv_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION <<
+ ATOM_VRAM_OPERATION_FLAGS_SHIFT)) == 0)) {
+ /* driver request VRAM reservation for SR-IOV */
+ adev->mman.drv_vram_usage_start_offset = (drv_start_addr &
+ (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
+ adev->mman.drv_vram_usage_size = drv_size << 10;
+ }
- WREG32(adev->bios_scratch_reg_offset + 3, tmp);
+ *usage_bytes = 0;
+ return 0;
}
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
@@ -106,20 +175,28 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
struct atom_context *ctx = adev->mode_info.atom_context;
int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
vram_usagebyfirmware);
- uint16_t data_offset;
+ struct vram_usagebyfirmware_v2_1 *fw_usage_v2_1;
+ struct vram_usagebyfirmware_v2_2 *fw_usage_v2_2;
+ u16 data_offset;
+ u8 frev, crev;
int usage_bytes = 0;
- if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
- struct vram_usagebyfirmware_v2_1 *firmware_usage =
- (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
-
- DRM_DEBUG("atom firmware requested %08x %dkb fw %dkb drv\n",
- le32_to_cpu(firmware_usage->start_address_in_kb),
- le16_to_cpu(firmware_usage->used_by_firmware_in_kb),
- le16_to_cpu(firmware_usage->used_by_driver_in_kb));
-
- usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) * 1024;
+ if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) {
+ if (frev == 2 && crev == 1) {
+ fw_usage_v2_1 =
+ (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset);
+ amdgpu_atomfirmware_allocate_fb_v2_1(adev,
+ fw_usage_v2_1,
+ &usage_bytes);
+ } else if (frev >= 2 && crev >= 2) {
+ fw_usage_v2_2 =
+ (struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset);
+ amdgpu_atomfirmware_allocate_fb_v2_2(adev,
+ fw_usage_v2_2,
+ &usage_bytes);
+ }
}
+
ctx->scratch_size_bytes = 0;
if (usage_bytes == 0)
usage_bytes = 20 * 1024;
@@ -130,3 +207,802 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
ctx->scratch_size_bytes = usage_bytes;
return 0;
}
+
+union igp_info {
+ struct atom_integrated_system_info_v1_11 v11;
+ struct atom_integrated_system_info_v1_12 v12;
+ struct atom_integrated_system_info_v2_1 v21;
+ struct atom_integrated_system_info_v2_3 v23;
+};
+
+union umc_info {
+ struct atom_umc_info_v3_1 v31;
+ struct atom_umc_info_v3_2 v32;
+ struct atom_umc_info_v3_3 v33;
+ struct atom_umc_info_v4_0 v40;
+};
+
+union vram_info {
+ struct atom_vram_info_header_v2_3 v23;
+ struct atom_vram_info_header_v2_4 v24;
+ struct atom_vram_info_header_v2_5 v25;
+ struct atom_vram_info_header_v2_6 v26;
+ struct atom_vram_info_header_v3_0 v30;
+};
+
+union vram_module {
+ struct atom_vram_module_v9 v9;
+ struct atom_vram_module_v10 v10;
+ struct atom_vram_module_v11 v11;
+ struct atom_vram_module_v3_0 v30;
+};
+
+static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
+ int atom_mem_type)
+{
+ int vram_type;
+
+ if (adev->flags & AMD_IS_APU) {
+ switch (atom_mem_type) {
+ case Ddr2MemType:
+ case LpDdr2MemType:
+ vram_type = AMDGPU_VRAM_TYPE_DDR2;
+ break;
+ case Ddr3MemType:
+ case LpDdr3MemType:
+ vram_type = AMDGPU_VRAM_TYPE_DDR3;
+ break;
+ case Ddr4MemType:
+ vram_type = AMDGPU_VRAM_TYPE_DDR4;
+ break;
+ case LpDdr4MemType:
+ vram_type = AMDGPU_VRAM_TYPE_LPDDR4;
+ break;
+ case Ddr5MemType:
+ vram_type = AMDGPU_VRAM_TYPE_DDR5;
+ break;
+ case LpDdr5MemType:
+ vram_type = AMDGPU_VRAM_TYPE_LPDDR5;
+ break;
+ default:
+ vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ break;
+ }
+ } else {
+ switch (atom_mem_type) {
+ case ATOM_DGPU_VRAM_TYPE_GDDR5:
+ vram_type = AMDGPU_VRAM_TYPE_GDDR5;
+ break;
+ case ATOM_DGPU_VRAM_TYPE_HBM2:
+ case ATOM_DGPU_VRAM_TYPE_HBM2E:
+ case ATOM_DGPU_VRAM_TYPE_HBM3:
+ vram_type = AMDGPU_VRAM_TYPE_HBM;
+ break;
+ case ATOM_DGPU_VRAM_TYPE_GDDR6:
+ vram_type = AMDGPU_VRAM_TYPE_GDDR6;
+ break;
+ case ATOM_DGPU_VRAM_TYPE_HBM3E:
+ vram_type = AMDGPU_VRAM_TYPE_HBM3E;
+ break;
+ default:
+ vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ break;
+ }
+ }
+
+ return vram_type;
+}
+
+int
+amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type,
+ int *vram_vendor)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index, i = 0;
+ u16 data_offset, size;
+ union igp_info *igp_info;
+ union vram_info *vram_info;
+ union umc_info *umc_info;
+ union vram_module *vram_module;
+ u8 frev, crev;
+ u8 mem_type;
+ u8 mem_vendor;
+ u32 mem_channel_number;
+ u32 mem_channel_width;
+ u32 module_id;
+
+ if (adev->flags & AMD_IS_APU)
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ integratedsysteminfo);
+ else {
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, umc_info);
+ break;
+ default:
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, vram_info);
+ }
+ }
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context,
+ index, &size,
+ &frev, &crev, &data_offset)) {
+ if (adev->flags & AMD_IS_APU) {
+ igp_info = (union igp_info *)
+ (mode_info->atom_context->bios + data_offset);
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 11:
+ case 12:
+ mem_channel_number = igp_info->v11.umachannelnumber;
+ if (!mem_channel_number)
+ mem_channel_number = 1;
+ mem_type = igp_info->v11.memorytype;
+ if (mem_type == LpDdr5MemType)
+ mem_channel_width = 32;
+ else
+ mem_channel_width = 64;
+ if (vram_width)
+ *vram_width = mem_channel_number * mem_channel_width;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case 2:
+ switch (crev) {
+ case 1:
+ case 2:
+ mem_channel_number = igp_info->v21.umachannelnumber;
+ if (!mem_channel_number)
+ mem_channel_number = 1;
+ mem_type = igp_info->v21.memorytype;
+ if (mem_type == LpDdr5MemType)
+ mem_channel_width = 32;
+ else
+ mem_channel_width = 64;
+ if (vram_width)
+ *vram_width = mem_channel_number * mem_channel_width;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
+ case 3:
+ mem_channel_number = igp_info->v23.umachannelnumber;
+ if (!mem_channel_number)
+ mem_channel_number = 1;
+ mem_type = igp_info->v23.memorytype;
+ if (mem_type == LpDdr5MemType)
+ mem_channel_width = 32;
+ else
+ mem_channel_width = 64;
+ if (vram_width)
+ *vram_width = mem_channel_number * mem_channel_width;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ umc_info = (union umc_info *)(mode_info->atom_context->bios + data_offset);
+
+ if (frev == 4) {
+ switch (crev) {
+ case 0:
+ mem_channel_number = le32_to_cpu(umc_info->v40.channel_num);
+ mem_type = le32_to_cpu(umc_info->v40.vram_type);
+ mem_channel_width = le32_to_cpu(umc_info->v40.channel_width);
+ mem_vendor = RREG32(adev->bios_scratch_reg_offset + 4) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else
+ return -EINVAL;
+ break;
+ default:
+ vram_info = (union vram_info *)
+ (mode_info->atom_context->bios + data_offset);
+
+ module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16;
+ if (frev == 3) {
+ switch (crev) {
+ /* v30 */
+ case 0:
+ vram_module = (union vram_module *)vram_info->v30.vram_module;
+ mem_vendor = (vram_module->v30.dram_vendor_id) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ mem_type = vram_info->v30.memory_type;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_info->v30.channel_num;
+ mem_channel_width = vram_info->v30.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * 16;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (frev == 2) {
+ switch (crev) {
+ /* v23 */
+ case 3:
+ if (module_id > vram_info->v23.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module *)vram_info->v23.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module + vram_module->v9.vram_module_size);
+ i++;
+ }
+ mem_type = vram_module->v9.memory_type;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_module->v9.channel_num;
+ mem_channel_width = vram_module->v9.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
+ mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
+ /* v24 */
+ case 4:
+ if (module_id > vram_info->v24.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module *)vram_info->v24.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module + vram_module->v10.vram_module_size);
+ i++;
+ }
+ mem_type = vram_module->v10.memory_type;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_module->v10.channel_num;
+ mem_channel_width = vram_module->v10.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
+ mem_vendor = (vram_module->v10.vender_rev_id) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
+ /* v25 */
+ case 5:
+ if (module_id > vram_info->v25.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module *)vram_info->v25.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module + vram_module->v11.vram_module_size);
+ i++;
+ }
+ mem_type = vram_module->v11.memory_type;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_module->v11.channel_num;
+ mem_channel_width = vram_module->v11.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
+ mem_vendor = (vram_module->v11.vender_rev_id) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
+ /* v26 */
+ case 6:
+ if (module_id > vram_info->v26.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module *)vram_info->v26.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module + vram_module->v9.vram_module_size);
+ i++;
+ }
+ mem_type = vram_module->v9.memory_type;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_module->v9.channel_num;
+ mem_channel_width = vram_module->v9.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
+ mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ /* invalid frev */
+ return -EINVAL;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Return true if vbios enabled ecc by default, if umc info table is available
+ * or false if ecc is not enabled or umc info table is not available
+ */
+bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index;
+ u16 data_offset, size;
+ union umc_info *umc_info;
+ u8 frev, crev;
+ bool mem_ecc_enabled = false;
+ u8 umc_config;
+ u32 umc_config1;
+ adev->ras_default_ecc_enabled = false;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ umc_info);
+
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context,
+ index, &size, &frev, &crev, &data_offset)) {
+ umc_info = (union umc_info *)(mode_info->atom_context->bios + data_offset);
+ if (frev == 3) {
+ switch (crev) {
+ case 1:
+ umc_config = le32_to_cpu(umc_info->v31.umc_config);
+ mem_ecc_enabled =
+ (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
+ break;
+ case 2:
+ umc_config = le32_to_cpu(umc_info->v32.umc_config);
+ mem_ecc_enabled =
+ (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
+ break;
+ case 3:
+ umc_config = le32_to_cpu(umc_info->v33.umc_config);
+ umc_config1 = le32_to_cpu(umc_info->v33.umc_config1);
+ mem_ecc_enabled =
+ ((umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ||
+ (umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE)) ? true : false;
+ adev->ras_default_ecc_enabled =
+ (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
+ break;
+ default:
+ /* unsupported crev */
+ return false;
+ }
+ } else if (frev == 4) {
+ switch (crev) {
+ case 0:
+ umc_config = le32_to_cpu(umc_info->v40.umc_config);
+ umc_config1 = le32_to_cpu(umc_info->v40.umc_config1);
+ mem_ecc_enabled =
+ (umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE) ? true : false;
+ adev->ras_default_ecc_enabled =
+ (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
+ break;
+ default:
+ /* unsupported crev */
+ return false;
+ }
+ } else {
+ /* unsupported frev */
+ return false;
+ }
+ }
+
+ return mem_ecc_enabled;
+}
+
+/*
+ * Helper function to query sram ecc capablity
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Return true if vbios supports sram ecc or false if not
+ */
+bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev)
+{
+ u32 fw_cap;
+
+ fw_cap = adev->mode_info.firmware_flags;
+
+ return (fw_cap & ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false;
+}
+
+/*
+ * Helper function to query dynamic boot config capability
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Return true if vbios supports dynamic boot config or false if not
+ */
+bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev)
+{
+ u32 fw_cap;
+
+ fw_cap = adev->mode_info.firmware_flags;
+
+ return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false;
+}
+
+/**
+ * amdgpu_atomfirmware_ras_rom_addr -- Get the RAS EEPROM addr from VBIOS
+ * @adev: amdgpu_device pointer
+ * @i2c_address: pointer to u8; if not NULL, will contain
+ * the RAS EEPROM address if the function returns true
+ *
+ * Return true if VBIOS supports RAS EEPROM address reporting,
+ * else return false. If true and @i2c_address is not NULL,
+ * will contain the RAS ROM address.
+ */
+bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev,
+ u8 *i2c_address)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index;
+ u16 data_offset, size;
+ union firmware_info *firmware_info;
+ u8 frev, crev;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ firmwareinfo);
+
+ if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
+ index, &size, &frev, &crev,
+ &data_offset)) {
+ /* support firmware_info 3.4 + */
+ if ((frev == 3 && crev >= 4) || (frev > 3)) {
+ firmware_info = (union firmware_info *)
+ (mode_info->atom_context->bios + data_offset);
+ /* The ras_rom_i2c_slave_addr should ideally
+ * be a 19-bit EEPROM address, which would be
+ * used as is by the driver; see top of
+ * amdgpu_eeprom.c.
+ *
+ * When this is the case, 0 is of course a
+ * valid RAS EEPROM address, in which case,
+ * we'll drop the first "if (firm...)" and only
+ * leave the check for the pointer.
+ *
+ * The reason this works right now is because
+ * ras_rom_i2c_slave_addr contains the EEPROM
+ * device type qualifier 1010b in the top 4
+ * bits.
+ */
+ if (firmware_info->v34.ras_rom_i2c_slave_addr) {
+ if (i2c_address)
+ *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+
+union smu_info {
+ struct atom_smu_info_v3_1 v31;
+ struct atom_smu_info_v4_0 v40;
+};
+
+union gfx_info {
+ struct atom_gfx_info_v2_2 v22;
+ struct atom_gfx_info_v2_4 v24;
+ struct atom_gfx_info_v2_7 v27;
+ struct atom_gfx_info_v3_0 v30;
+};
+
+int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ struct amdgpu_pll *spll = &adev->clock.spll;
+ struct amdgpu_pll *mpll = &adev->clock.mpll;
+ uint8_t frev, crev;
+ uint16_t data_offset;
+ int ret = -EINVAL, index;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ firmwareinfo);
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ union firmware_info *firmware_info =
+ (union firmware_info *)(mode_info->atom_context->bios +
+ data_offset);
+
+ adev->clock.default_sclk =
+ le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
+ adev->clock.default_mclk =
+ le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
+
+ adev->pm.current_sclk = adev->clock.default_sclk;
+ adev->pm.current_mclk = adev->clock.default_mclk;
+
+ ret = 0;
+ }
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ smu_info);
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ union smu_info *smu_info =
+ (union smu_info *)(mode_info->atom_context->bios +
+ data_offset);
+
+ /* system clock */
+ if (frev == 3)
+ spll->reference_freq = le32_to_cpu(smu_info->v31.core_refclk_10khz);
+ else if (frev == 4)
+ spll->reference_freq = le32_to_cpu(smu_info->v40.core_refclk_10khz);
+
+ spll->reference_div = 0;
+ spll->min_post_div = 1;
+ spll->max_post_div = 1;
+ spll->min_ref_div = 2;
+ spll->max_ref_div = 0xff;
+ spll->min_feedback_div = 4;
+ spll->max_feedback_div = 0xff;
+ spll->best_vco = 0;
+
+ ret = 0;
+ }
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ umc_info);
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ union umc_info *umc_info =
+ (union umc_info *)(mode_info->atom_context->bios +
+ data_offset);
+
+ /* memory clock */
+ mpll->reference_freq = le32_to_cpu(umc_info->v31.mem_refclk_10khz);
+
+ mpll->reference_div = 0;
+ mpll->min_post_div = 1;
+ mpll->max_post_div = 1;
+ mpll->min_ref_div = 2;
+ mpll->max_ref_div = 0xff;
+ mpll->min_feedback_div = 4;
+ mpll->max_feedback_div = 0xff;
+ mpll->best_vco = 0;
+
+ ret = 0;
+ }
+
+ /* if asic is Navi+, the rlc reference clock is used for system clock
+ * from vbios gfx_info table */
+ if (adev->asic_type >= CHIP_NAVI10) {
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ gfx_info);
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ union gfx_info *gfx_info = (union gfx_info *)
+ (mode_info->atom_context->bios + data_offset);
+ if ((frev == 3) ||
+ (frev == 2 && crev == 6)) {
+ spll->reference_freq = le32_to_cpu(gfx_info->v30.golden_tsc_count_lower_refclk);
+ ret = 0;
+ } else if ((frev == 2) &&
+ (crev >= 2) &&
+ (crev != 6)) {
+ spll->reference_freq = le32_to_cpu(gfx_info->v22.rlc_gpu_timer_refclk);
+ ret = 0;
+ } else {
+ BUG();
+ }
+ }
+ }
+
+ return ret;
+}
+
+int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index;
+ uint8_t frev, crev;
+ uint16_t data_offset;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ gfx_info);
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ union gfx_info *gfx_info = (union gfx_info *)
+ (mode_info->atom_context->bios + data_offset);
+ if (frev == 2) {
+ switch (crev) {
+ case 4:
+ adev->gfx.config.max_shader_engines = gfx_info->v24.max_shader_engines;
+ adev->gfx.config.max_cu_per_sh = gfx_info->v24.max_cu_per_sh;
+ adev->gfx.config.max_sh_per_se = gfx_info->v24.max_sh_per_se;
+ adev->gfx.config.max_backends_per_se = gfx_info->v24.max_backends_per_se;
+ adev->gfx.config.max_texture_channel_caches = gfx_info->v24.max_texture_channel_caches;
+ adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
+ adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
+ adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
+ adev->gfx.config.gs_prim_buffer_depth =
+ le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth);
+ adev->gfx.config.double_offchip_lds_buf =
+ gfx_info->v24.gc_double_offchip_lds_buffer;
+ adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size);
+ adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd);
+ adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu;
+ adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size);
+ return 0;
+ case 7:
+ adev->gfx.config.max_shader_engines = gfx_info->v27.max_shader_engines;
+ adev->gfx.config.max_cu_per_sh = gfx_info->v27.max_cu_per_sh;
+ adev->gfx.config.max_sh_per_se = gfx_info->v27.max_sh_per_se;
+ adev->gfx.config.max_backends_per_se = gfx_info->v27.max_backends_per_se;
+ adev->gfx.config.max_texture_channel_caches = gfx_info->v27.max_texture_channel_caches;
+ adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v27.gc_num_gprs);
+ adev->gfx.config.max_gs_threads = gfx_info->v27.gc_num_max_gs_thds;
+ adev->gfx.config.gs_vgt_table_depth = gfx_info->v27.gc_gs_table_depth;
+ adev->gfx.config.gs_prim_buffer_depth = le16_to_cpu(gfx_info->v27.gc_gsprim_buff_depth);
+ adev->gfx.config.double_offchip_lds_buf = gfx_info->v27.gc_double_offchip_lds_buffer;
+ adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v27.gc_wave_size);
+ adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v27.gc_max_waves_per_simd);
+ adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v27.gc_max_scratch_slots_per_cu;
+ adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v27.gc_lds_size);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ } else if (frev == 3) {
+ switch (crev) {
+ case 0:
+ adev->gfx.config.max_shader_engines = gfx_info->v30.max_shader_engines;
+ adev->gfx.config.max_cu_per_sh = gfx_info->v30.max_cu_per_sh;
+ adev->gfx.config.max_sh_per_se = gfx_info->v30.max_sh_per_se;
+ adev->gfx.config.max_backends_per_se = gfx_info->v30.max_backends_per_se;
+ adev->gfx.config.max_texture_channel_caches = gfx_info->v30.max_texture_channel_caches;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ }
+ return -EINVAL;
+}
+
+/*
+ * Helper function to query two stage mem training capability
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Return true if two stage mem training is supported or false if not
+ */
+bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev)
+{
+ u32 fw_cap;
+
+ fw_cap = adev->mode_info.firmware_flags;
+
+ return (fw_cap & ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING) ? true : false;
+}
+
+int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev)
+{
+ struct atom_context *ctx = adev->mode_info.atom_context;
+ union firmware_info *firmware_info;
+ int index;
+ u16 data_offset, size;
+ u8 frev, crev;
+ int fw_reserved_fb_size;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ firmwareinfo);
+
+ if (!amdgpu_atom_parse_data_header(ctx, index, &size,
+ &frev, &crev, &data_offset))
+ /* fail to parse data_header */
+ return 0;
+
+ firmware_info = (union firmware_info *)(ctx->bios + data_offset);
+
+ if (frev != 3)
+ return -EINVAL;
+
+ switch (crev) {
+ case 4:
+ fw_reserved_fb_size =
+ (firmware_info->v34.fw_reserved_size_in_kb << 10);
+ break;
+ case 5:
+ fw_reserved_fb_size =
+ (firmware_info->v35.fw_reserved_size_in_kb << 10);
+ break;
+ default:
+ fw_reserved_fb_size = 0;
+ break;
+ }
+
+ return fw_reserved_fb_size;
+}
+
+/*
+ * Helper function to execute asic_init table
+ *
+ * @adev: amdgpu_device pointer
+ * @fb_reset: flag to indicate whether fb is reset or not
+ *
+ * Return 0 if succeed, otherwise failed
+ */
+int amdgpu_atomfirmware_asic_init(struct amdgpu_device *adev, bool fb_reset)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ struct atom_context *ctx;
+ uint8_t frev, crev;
+ uint16_t data_offset;
+ uint32_t bootup_sclk_in10khz, bootup_mclk_in10khz;
+ struct asic_init_ps_allocation_v2_1 asic_init_ps_v2_1;
+ int index;
+
+ if (!mode_info)
+ return -EINVAL;
+
+ ctx = mode_info->atom_context;
+ if (!ctx)
+ return -EINVAL;
+
+ /* query bootup sclk/mclk from firmware_info table */
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ firmwareinfo);
+ if (amdgpu_atom_parse_data_header(ctx, index, NULL,
+ &frev, &crev, &data_offset)) {
+ union firmware_info *firmware_info =
+ (union firmware_info *)(ctx->bios +
+ data_offset);
+
+ bootup_sclk_in10khz =
+ le32_to_cpu(firmware_info->v31.bootup_sclk_in10khz);
+ bootup_mclk_in10khz =
+ le32_to_cpu(firmware_info->v31.bootup_mclk_in10khz);
+ } else {
+ return -EINVAL;
+ }
+
+ index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
+ asic_init);
+ if (amdgpu_atom_parse_cmd_header(mode_info->atom_context, index, &frev, &crev)) {
+ if (frev == 2 && crev >= 1) {
+ memset(&asic_init_ps_v2_1, 0, sizeof(asic_init_ps_v2_1));
+ asic_init_ps_v2_1.param.engineparam.sclkfreqin10khz = bootup_sclk_in10khz;
+ asic_init_ps_v2_1.param.memparam.mclkfreqin10khz = bootup_mclk_in10khz;
+ asic_init_ps_v2_1.param.engineparam.engineflag = b3NORMAL_ENGINE_INIT;
+ if (!fb_reset)
+ asic_init_ps_v2_1.param.memparam.memflag = b3DRAM_SELF_REFRESH_EXIT;
+ else
+ asic_init_ps_v2_1.param.memparam.memflag = 0;
+ } else {
+ return -EINVAL;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, (uint32_t *)&asic_init_ps_v2_1,
+ sizeof(asic_init_ps_v2_1));
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index a2c3ebe22c71..649b5530d8ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -24,12 +24,22 @@
#ifndef __AMDGPU_ATOMFIRMWARE_H__
#define __AMDGPU_ATOMFIRMWARE_H__
-bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev);
+#define get_index_into_master_table(master_table, table_name) (offsetof(struct master_table, table_name) / sizeof(uint16_t))
+
+uint32_t amdgpu_atomfirmware_query_firmware_capability(struct amdgpu_device *adev);
+bool amdgpu_atomfirmware_gpu_virtualization_supported(struct amdgpu_device *adev);
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
-void amdgpu_atomfirmware_scratch_regs_save(struct amdgpu_device *adev);
-void amdgpu_atomfirmware_scratch_regs_restore(struct amdgpu_device *adev);
-void amdgpu_atomfirmware_scratch_regs_engine_hung(struct amdgpu_device *adev,
- bool hung);
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
+int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
+ int *vram_width, int *vram_type, int *vram_vendor);
+int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
+int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
+bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
+bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev);
+bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t *i2c_address);
+bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev);
+bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev);
+int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev);
+int amdgpu_atomfirmware_asic_init(struct amdgpu_device *adev, bool fb_reset);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index c13c51af0b68..3893e6fc2f03 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -1,9 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2010 Red Hat Inc.
* Author : Dave Airlie <airlied@redhat.com>
*
- * Licensed under GPLv2
- *
* ATPX support for both Intel/ATI
*/
#include <linux/vga_switcheroo.h>
@@ -12,8 +11,19 @@
#include <linux/pci.h>
#include <linux/delay.h>
+#include "amdgpu.h"
#include "amd_acpi.h"
+#define AMDGPU_PX_QUIRK_FORCE_ATPX (1 << 0)
+
+struct amdgpu_px_quirk {
+ u32 chip_vendor;
+ u32 chip_device;
+ u32 subsys_vendor;
+ u32 subsys_device;
+ u32 px_quirk_flags;
+};
+
struct amdgpu_atpx_functions {
bool px_params;
bool power_cntl;
@@ -22,7 +32,7 @@ struct amdgpu_atpx_functions {
bool switch_start;
bool switch_end;
bool disp_connectors_mapping;
- bool disp_detetion_ports;
+ bool disp_detection_ports;
};
struct amdgpu_atpx {
@@ -35,6 +45,7 @@ struct amdgpu_atpx {
static struct amdgpu_atpx_priv {
bool atpx_detected;
bool bridge_pm_usable;
+ unsigned int quirks;
/* handle for device - and atpx */
acpi_handle dhandle;
acpi_handle other_handle;
@@ -63,22 +74,21 @@ struct atpx_mux {
u16 mux;
} __packed;
-bool amdgpu_has_atpx(void) {
+bool amdgpu_has_atpx(void)
+{
return amdgpu_atpx_priv.atpx_detected;
}
-bool amdgpu_has_atpx_dgpu_power_cntl(void) {
+bool amdgpu_has_atpx_dgpu_power_cntl(void)
+{
return amdgpu_atpx_priv.atpx.functions.power_cntl;
}
-bool amdgpu_is_atpx_hybrid(void) {
+bool amdgpu_is_atpx_hybrid(void)
+{
return amdgpu_atpx_priv.atpx.is_hybrid;
}
-bool amdgpu_atpx_dgpu_req_power_for_displays(void) {
- return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays;
-}
-
/**
* amdgpu_atpx_call - call an ATPX method
*
@@ -117,7 +127,7 @@ static union acpi_object *amdgpu_atpx_call(acpi_handle handle, int function,
/* Fail only if calling the method fails and ATPX is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- printk("failed to evaluate ATPX got %s\n",
+ pr_err("failed to evaluate ATPX got %s\n",
acpi_format_exception(status));
kfree(buffer.pointer);
return NULL;
@@ -145,11 +155,11 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
f->switch_start = mask & ATPX_GRAPHICS_DEVICE_SWITCH_START_NOTIFICATION_SUPPORTED;
f->switch_end = mask & ATPX_GRAPHICS_DEVICE_SWITCH_END_NOTIFICATION_SUPPORTED;
f->disp_connectors_mapping = mask & ATPX_GET_DISPLAY_CONNECTORS_MAPPING_SUPPORTED;
- f->disp_detetion_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
+ f->disp_detection_ports = mask & ATPX_GET_DISPLAY_DETECTION_PORTS_SUPPORTED;
}
/**
- * amdgpu_atpx_validate_functions - validate ATPX functions
+ * amdgpu_atpx_validate - validate ATPX functions
*
* @atpx: amdgpu atpx struct
*
@@ -173,7 +183,7 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
size = *(u16 *) info->buffer.pointer;
if (size < 10) {
- printk("ATPX buffer is too small: %zu\n", size);
+ pr_err("ATPX buffer is too small: %zu\n", size);
kfree(info);
return -EINVAL;
}
@@ -205,13 +215,19 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
atpx->is_hybrid = false;
if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
- printk("ATPX Hybrid Graphics\n");
- /*
- * Disable legacy PM methods only when pcie port PM is usable,
- * otherwise the device might fail to power off or power on.
- */
- atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable;
- atpx->is_hybrid = true;
+ if (amdgpu_atpx_priv.quirks & AMDGPU_PX_QUIRK_FORCE_ATPX) {
+ pr_warn("ATPX Hybrid Graphics, forcing to ATPX\n");
+ atpx->functions.power_cntl = true;
+ atpx->is_hybrid = false;
+ } else {
+ pr_notice("ATPX Hybrid Graphics\n");
+ /*
+ * Disable legacy PM methods only when pcie port PM is usable,
+ * otherwise the device might fail to power off or power on.
+ */
+ atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable;
+ atpx->is_hybrid = true;
+ }
}
atpx->dgpu_req_power_for_displays = false;
@@ -246,7 +262,7 @@ static int amdgpu_atpx_verify_interface(struct amdgpu_atpx *atpx)
size = *(u16 *) info->buffer.pointer;
if (size < 8) {
- printk("ATPX buffer is too small: %zu\n", size);
+ pr_err("ATPX buffer is too small: %zu\n", size);
err = -EINVAL;
goto out;
}
@@ -255,8 +271,8 @@ static int amdgpu_atpx_verify_interface(struct amdgpu_atpx *atpx)
memcpy(&output, info->buffer.pointer, size);
/* TODO: check version? */
- printk("ATPX version %u, functions 0x%08x\n",
- output.version, output.function_bits);
+ pr_notice("ATPX version %u, functions 0x%08x\n",
+ output.version, output.function_bits);
amdgpu_atpx_parse_functions(&atpx->functions, output.function_bits);
@@ -533,7 +549,7 @@ static int amdgpu_atpx_init(void)
* look up whether we are the integrated or discrete GPU (all asics).
* Returns the client id.
*/
-static int amdgpu_atpx_get_client_id(struct pci_dev *pdev)
+static enum vga_switcheroo_client_id amdgpu_atpx_get_client_id(struct pci_dev *pdev)
{
if (amdgpu_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev))
return VGA_SWITCHEROO_IGD;
@@ -547,6 +563,34 @@ static const struct vga_switcheroo_handler amdgpu_atpx_handler = {
.get_client_id = amdgpu_atpx_get_client_id,
};
+static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
+ /* HG _PR3 doesn't seem to work on this A+A weston board */
+ { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x699f, 0x1028, 0x0814, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x6900, 0x17AA, 0x3806, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0, 0, 0, 0, 0 },
+};
+
+static void amdgpu_atpx_get_quirks(struct pci_dev *pdev)
+{
+ const struct amdgpu_px_quirk *p = amdgpu_px_quirk_list;
+
+ /* Apply PX quirks */
+ while (p && p->chip_device != 0) {
+ if (pdev->vendor == p->chip_vendor &&
+ pdev->device == p->chip_device &&
+ pdev->subsystem_vendor == p->subsys_vendor &&
+ pdev->subsystem_device == p->subsys_device) {
+ amdgpu_atpx_priv.quirks |= p->px_quirk_flags;
+ break;
+ }
+ ++p;
+ }
+}
+
/**
* amdgpu_atpx_detect - detect whether we have PX
*
@@ -566,19 +610,21 @@ static bool amdgpu_atpx_detect(void)
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
vga_count++;
- has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
+ has_atpx |= amdgpu_atpx_pci_probe_handle(pdev);
parent_pdev = pci_upstream_bridge(pdev);
d3_supported |= parent_pdev && parent_pdev->bridge_d3;
+ amdgpu_atpx_get_quirks(pdev);
}
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
vga_count++;
- has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
+ has_atpx |= amdgpu_atpx_pci_probe_handle(pdev);
parent_pdev = pci_upstream_bridge(pdev);
d3_supported |= parent_pdev && parent_pdev->bridge_d3;
+ amdgpu_atpx_get_quirks(pdev);
}
if (has_atpx && vga_count == 2) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 1beae5b930d0..199693369c7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -21,7 +21,7 @@
*
* Authors: Jerome Glisse
*/
-#include <drm/drmP.h>
+
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -29,120 +29,99 @@
#define AMDGPU_BENCHMARK_COMMON_MODES_N 17
static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
- uint64_t saddr, uint64_t daddr, int n)
+ uint64_t saddr, uint64_t daddr, int n, s64 *time_ms)
{
- unsigned long start_jiffies;
- unsigned long end_jiffies;
- struct dma_fence *fence = NULL;
+ ktime_t stime, etime;
+ struct dma_fence *fence;
int i, r;
- start_jiffies = jiffies;
+ stime = ktime_get();
for (i = 0; i < n; i++) {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
- false);
+ false, false, 0);
if (r)
goto exit_do_move;
r = dma_fence_wait(fence, false);
+ dma_fence_put(fence);
if (r)
goto exit_do_move;
- dma_fence_put(fence);
}
- end_jiffies = jiffies;
- r = jiffies_to_msecs(end_jiffies - start_jiffies);
exit_do_move:
- if (fence)
- dma_fence_put(fence);
+ etime = ktime_get();
+ *time_ms = ktime_ms_delta(etime, stime);
+
return r;
}
-static void amdgpu_benchmark_log_results(int n, unsigned size,
- unsigned int time,
+static void amdgpu_benchmark_log_results(struct amdgpu_device *adev,
+ int n, unsigned size,
+ s64 time_ms,
unsigned sdomain, unsigned ddomain,
char *kind)
{
- unsigned int throughput = (n * (size >> 10)) / time;
- DRM_INFO("amdgpu: %s %u bo moves of %u kB from"
- " %d to %d in %u ms, throughput: %u Mb/s or %u MB/s\n",
- kind, n, size >> 10, sdomain, ddomain, time,
+ s64 throughput = (n * (size >> 10));
+
+ throughput = div64_s64(throughput, time_ms);
+
+ dev_info(adev->dev, "amdgpu: %s %u bo moves of %u kB from"
+ " %d to %d in %lld ms, throughput: %lld Mb/s or %lld MB/s\n",
+ kind, n, size >> 10, sdomain, ddomain, time_ms,
throughput * 8, throughput);
}
-static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
- unsigned sdomain, unsigned ddomain)
+static int amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
+ unsigned sdomain, unsigned ddomain)
{
struct amdgpu_bo *dobj = NULL;
struct amdgpu_bo *sobj = NULL;
uint64_t saddr, daddr;
+ s64 time_ms;
int r, n;
- int time;
n = AMDGPU_BENCHMARK_ITERATIONS;
- r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL,
- NULL, &sobj);
- if (r) {
- goto out_cleanup;
- }
- r = amdgpu_bo_reserve(sobj, false);
- if (unlikely(r != 0))
- goto out_cleanup;
- r = amdgpu_bo_pin(sobj, sdomain, &saddr);
- amdgpu_bo_unreserve(sobj);
- if (r) {
- goto out_cleanup;
- }
- r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL,
- NULL, &dobj);
- if (r) {
- goto out_cleanup;
- }
- r = amdgpu_bo_reserve(dobj, false);
- if (unlikely(r != 0))
+
+ r = amdgpu_bo_create_kernel(adev, size,
+ PAGE_SIZE, sdomain,
+ &sobj,
+ &saddr,
+ NULL);
+ if (r)
goto out_cleanup;
- r = amdgpu_bo_pin(dobj, ddomain, &daddr);
- amdgpu_bo_unreserve(dobj);
- if (r) {
+ r = amdgpu_bo_create_kernel(adev, size,
+ PAGE_SIZE, ddomain,
+ &dobj,
+ &daddr,
+ NULL);
+ if (r)
goto out_cleanup;
- }
if (adev->mman.buffer_funcs) {
- time = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n);
- if (time < 0)
+ r = amdgpu_benchmark_do_move(adev, size, saddr, daddr, n, &time_ms);
+ if (r)
goto out_cleanup;
- if (time > 0)
- amdgpu_benchmark_log_results(n, size, time,
+ else
+ amdgpu_benchmark_log_results(adev, n, size, time_ms,
sdomain, ddomain, "dma");
}
out_cleanup:
/* Check error value now. The value can be overwritten when clean up.*/
- if (r) {
- DRM_ERROR("Error while benchmarking BO move.\n");
- }
+ if (r < 0)
+ dev_info(adev->dev, "Error while benchmarking BO move.\n");
- if (sobj) {
- r = amdgpu_bo_reserve(sobj, true);
- if (likely(r == 0)) {
- amdgpu_bo_unpin(sobj);
- amdgpu_bo_unreserve(sobj);
- }
- amdgpu_bo_unref(&sobj);
- }
- if (dobj) {
- r = amdgpu_bo_reserve(dobj, true);
- if (likely(r == 0)) {
- amdgpu_bo_unpin(dobj);
- amdgpu_bo_unreserve(dobj);
- }
- amdgpu_bo_unref(&dobj);
- }
+ if (sobj)
+ amdgpu_bo_free_kernel(&sobj, &saddr, NULL);
+ if (dobj)
+ amdgpu_bo_free_kernel(&dobj, &daddr, NULL);
+ return r;
}
-void amdgpu_benchmark(struct amdgpu_device *adev, int test_number)
+int amdgpu_benchmark(struct amdgpu_device *adev, int test_number)
{
- int i;
+ int i, r;
static const int common_modes[AMDGPU_BENCHMARK_COMMON_MODES_N] = {
640 * 480 * 4,
720 * 480 * 4,
@@ -163,63 +142,119 @@ void amdgpu_benchmark(struct amdgpu_device *adev, int test_number)
1920 * 1200 * 4
};
+ mutex_lock(&adev->benchmark_mutex);
switch (test_number) {
case 1:
+ dev_info(adev->dev,
+ "benchmark test: %d (simple test, VRAM to GTT and GTT to VRAM)\n",
+ test_number);
/* simple test, VRAM to GTT and GTT to VRAM */
- amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_DOMAIN_VRAM);
- amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_DOMAIN_GTT);
+ r = amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_DOMAIN_VRAM);
+ if (r)
+ goto done;
+ r = amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_GTT);
+ if (r)
+ goto done;
break;
case 2:
+ dev_info(adev->dev,
+ "benchmark test: %d (simple test, VRAM to VRAM)\n",
+ test_number);
/* simple test, VRAM to VRAM */
- amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_DOMAIN_VRAM);
+ r = amdgpu_benchmark_move(adev, 1024*1024, AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM);
+ if (r)
+ goto done;
break;
case 3:
+ dev_info(adev->dev,
+ "benchmark test: %d (GTT to VRAM, buffer size sweep, powers of 2)\n",
+ test_number);
/* GTT to VRAM, buffer size sweep, powers of 2 */
- for (i = 1; i <= 16384; i <<= 1)
- amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_DOMAIN_VRAM);
+ for (i = 1; i <= 16384; i <<= 1) {
+ r = amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_DOMAIN_VRAM);
+ if (r)
+ goto done;
+ }
break;
case 4:
+ dev_info(adev->dev,
+ "benchmark test: %d (VRAM to GTT, buffer size sweep, powers of 2)\n",
+ test_number);
/* VRAM to GTT, buffer size sweep, powers of 2 */
- for (i = 1; i <= 16384; i <<= 1)
- amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_DOMAIN_GTT);
+ for (i = 1; i <= 16384; i <<= 1) {
+ r = amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_GTT);
+ if (r)
+ goto done;
+ }
break;
case 5:
+ dev_info(adev->dev,
+ "benchmark test: %d (VRAM to VRAM, buffer size sweep, powers of 2)\n",
+ test_number);
/* VRAM to VRAM, buffer size sweep, powers of 2 */
- for (i = 1; i <= 16384; i <<= 1)
- amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_DOMAIN_VRAM);
+ for (i = 1; i <= 16384; i <<= 1) {
+ r = amdgpu_benchmark_move(adev, i * AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM);
+ if (r)
+ goto done;
+ }
break;
case 6:
+ dev_info(adev->dev,
+ "benchmark test: %d (GTT to VRAM, buffer size sweep, common modes)\n",
+ test_number);
/* GTT to VRAM, buffer size sweep, common modes */
- for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
- amdgpu_benchmark_move(adev, common_modes[i],
- AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_DOMAIN_VRAM);
+ for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++) {
+ r = amdgpu_benchmark_move(adev, common_modes[i],
+ AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_DOMAIN_VRAM);
+ if (r)
+ goto done;
+ }
break;
case 7:
+ dev_info(adev->dev,
+ "benchmark test: %d (VRAM to GTT, buffer size sweep, common modes)\n",
+ test_number);
/* VRAM to GTT, buffer size sweep, common modes */
- for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
- amdgpu_benchmark_move(adev, common_modes[i],
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_DOMAIN_GTT);
+ for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++) {
+ r = amdgpu_benchmark_move(adev, common_modes[i],
+ AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_GTT);
+ if (r)
+ goto done;
+ }
break;
case 8:
+ dev_info(adev->dev,
+ "benchmark test: %d (VRAM to VRAM, buffer size sweep, common modes)\n",
+ test_number);
/* VRAM to VRAM, buffer size sweep, common modes */
- for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++)
- amdgpu_benchmark_move(adev, common_modes[i],
+ for (i = 0; i < AMDGPU_BENCHMARK_COMMON_MODES_N; i++) {
+ r = amdgpu_benchmark_move(adev, common_modes[i],
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_DOMAIN_VRAM);
+ if (r)
+ goto done;
+ }
break;
default:
- DRM_ERROR("Unknown benchmark\n");
+ dev_info(adev->dev, "Unknown benchmark %d\n", test_number);
+ r = -EINVAL;
+ break;
}
+
+done:
+ mutex_unlock(&adev->benchmark_mutex);
+
+ return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 365e735f6647..00e96419fcda 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -25,10 +25,12 @@
* Alex Deucher
* Jerome Glisse
*/
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include "atom.h"
+#include <linux/device.h>
+#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/acpi.h>
/*
@@ -45,58 +47,48 @@
/* Check if current bios is an ATOM BIOS.
* Return true if it is ATOM BIOS. Otherwise, return false.
*/
-static bool check_atom_bios(uint8_t *bios, size_t size)
+static bool check_atom_bios(struct amdgpu_device *adev, size_t size)
{
uint16_t tmp, bios_header_start;
+ uint8_t *bios = adev->bios;
if (!bios || size < 0x49) {
- DRM_INFO("vbios mem is null or mem size is wrong\n");
+ dev_dbg(adev->dev, "VBIOS mem is null or mem size is wrong\n");
return false;
}
if (!AMD_IS_VALID_VBIOS(bios)) {
- DRM_INFO("BIOS signature incorrect %x %x\n", bios[0], bios[1]);
- return false;
- }
-
- tmp = bios[0x18] | (bios[0x19] << 8);
- if (bios[tmp + 0x14] != 0x0) {
- DRM_INFO("Not an x86 BIOS ROM\n");
+ dev_dbg(adev->dev, "VBIOS signature incorrect %x %x\n", bios[0],
+ bios[1]);
return false;
}
bios_header_start = bios[0x48] | (bios[0x49] << 8);
if (!bios_header_start) {
- DRM_INFO("Can't locate bios header\n");
+ dev_dbg(adev->dev, "Can't locate VBIOS header\n");
return false;
}
tmp = bios_header_start + 4;
if (size < tmp) {
- DRM_INFO("BIOS header is broken\n");
+ dev_dbg(adev->dev, "VBIOS header is broken\n");
return false;
}
if (!memcmp(bios + tmp, "ATOM", 4) ||
!memcmp(bios + tmp, "MOTA", 4)) {
- DRM_DEBUG("ATOMBIOS detected\n");
+ dev_dbg(adev->dev, "ATOMBIOS detected\n");
return true;
}
return false;
}
-static bool is_atom_fw(uint8_t *bios)
+void amdgpu_bios_release(struct amdgpu_device *adev)
{
- uint16_t bios_header_start = bios[0x48] | (bios[0x49] << 8);
- uint8_t frev = bios[bios_header_start + 2];
- uint8_t crev = bios[bios_header_start + 3];
-
- if ((frev < 3) ||
- ((frev == 3) && (crev < 3)))
- return false;
-
- return true;
+ kfree(adev->bios);
+ adev->bios = NULL;
+ adev->bios_size = 0;
}
/* If you boot an IGP board with a discrete card as the primary,
@@ -104,23 +96,27 @@ static bool is_atom_fw(uint8_t *bios)
* part of the system bios. On boot, the system bios puts a
* copy of the igp rom at the start of vram if a discrete card is
* present.
+ * For SR-IOV, the vbios image is also put in VRAM in the VF.
*/
-static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
+static bool amdgpu_read_bios_from_vram(struct amdgpu_device *adev)
{
uint8_t __iomem *bios;
resource_size_t vram_base;
resource_size_t size = 256 * 1024; /* ??? */
if (!(adev->flags & AMD_IS_APU))
- if (amdgpu_need_post(adev))
+ if (amdgpu_device_need_post(adev))
return false;
+ /* FB BAR not enabled */
+ if (pci_resource_len(adev->pdev, 0) == 0)
+ return false;
+
adev->bios = NULL;
vram_base = pci_resource_start(adev->pdev, 0);
- bios = ioremap(vram_base, size);
- if (!bios) {
+ bios = ioremap_wc(vram_base, size);
+ if (!bios)
return false;
- }
adev->bios = kmalloc(size, GFP_KERNEL);
if (!adev->bios) {
@@ -131,8 +127,8 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
memcpy_fromio(adev->bios, bios, size);
iounmap(bios);
- if (!check_atom_bios(adev->bios, size)) {
- kfree(adev->bios);
+ if (!check_atom_bios(adev, size)) {
+ amdgpu_bios_release(adev);
return false;
}
@@ -147,9 +143,8 @@ bool amdgpu_read_bios(struct amdgpu_device *adev)
adev->bios = NULL;
/* XXX: some cards may return 0 for rom size? ddx has a workaround */
bios = pci_map_rom(adev->pdev, &size);
- if (!bios) {
+ if (!bios)
return false;
- }
adev->bios = kzalloc(size, GFP_KERNEL);
if (adev->bios == NULL) {
@@ -160,8 +155,8 @@ bool amdgpu_read_bios(struct amdgpu_device *adev)
memcpy_fromio(adev->bios, bios, size);
pci_unmap_rom(adev->pdev, bios);
- if (!check_atom_bios(adev->bios, size)) {
- kfree(adev->bios);
+ if (!check_atom_bios(adev, size)) {
+ amdgpu_bios_release(adev);
return false;
}
@@ -173,7 +168,7 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
u8 header[AMD_VBIOS_SIGNATURE_END+1] = {0};
int len;
- if (!adev->asic_funcs->read_bios_from_rom)
+ if (!adev->asic_funcs || !adev->asic_funcs->read_bios_from_rom)
return false;
/* validate VBIOS signature */
@@ -182,9 +177,9 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
header[AMD_VBIOS_SIGNATURE_END] = 0;
if ((!AMD_IS_VALID_VBIOS(header)) ||
- 0 != memcmp((char *)&header[AMD_VBIOS_SIGNATURE_OFFSET],
- AMD_VBIOS_SIGNATURE,
- strlen(AMD_VBIOS_SIGNATURE)))
+ memcmp((char *)&header[AMD_VBIOS_SIGNATURE_OFFSET],
+ AMD_VBIOS_SIGNATURE,
+ strlen(AMD_VBIOS_SIGNATURE)) != 0)
return false;
/* valid vbios, go on */
@@ -200,8 +195,8 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
/* read complete BIOS */
amdgpu_asic_read_bios_from_rom(adev, adev->bios, len);
- if (!check_atom_bios(adev->bios, len)) {
- kfree(adev->bios);
+ if (!check_atom_bios(adev, len)) {
+ amdgpu_bios_release(adev);
return false;
}
@@ -210,30 +205,36 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
{
- uint8_t __iomem *bios;
- size_t size;
+ phys_addr_t rom = adev->pdev->rom;
+ size_t romlen = adev->pdev->romlen;
+ void __iomem *bios;
adev->bios = NULL;
- bios = pci_platform_rom(adev->pdev, &size);
- if (!bios) {
+ if (!rom || romlen == 0)
return false;
- }
- adev->bios = kzalloc(size, GFP_KERNEL);
- if (adev->bios == NULL)
+ adev->bios = kzalloc(romlen, GFP_KERNEL);
+ if (!adev->bios)
return false;
- memcpy_fromio(adev->bios, bios, size);
+ bios = ioremap(rom, romlen);
+ if (!bios)
+ goto free_bios;
- if (!check_atom_bios(adev->bios, size)) {
- kfree(adev->bios);
- return false;
- }
+ memcpy_fromio(adev->bios, bios, romlen);
+ iounmap(bios);
- adev->bios_size = size;
+ if (!check_atom_bios(adev, romlen))
+ goto free_bios;
+
+ adev->bios_size = romlen;
return true;
+free_bios:
+ amdgpu_bios_release(adev);
+
+ return false;
}
#ifdef CONFIG_ACPI
@@ -273,7 +274,7 @@ static int amdgpu_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
if (ACPI_FAILURE(status)) {
- printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
+ DRM_ERROR("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
return -ENODEV;
}
@@ -294,42 +295,33 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
acpi_status status;
bool found = false;
- /* ATRM is for the discrete card only */
- if (adev->flags & AMD_IS_APU)
+ /* ATRM is for on-platform devices only */
+ if (dev_is_removable(&adev->pdev->dev))
return false;
- while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ while ((pdev = pci_get_base_class(PCI_BASE_CLASS_DISPLAY, pdev))) {
+ if ((pdev->class != PCI_CLASS_DISPLAY_VGA << 8) &&
+ (pdev->class != PCI_CLASS_DISPLAY_OTHER << 8))
+ continue;
+
dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
continue;
status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
- if (!ACPI_FAILURE(status)) {
+ if (ACPI_SUCCESS(status)) {
found = true;
break;
}
}
- if (!found) {
- while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
- dhandle = ACPI_HANDLE(&pdev->dev);
- if (!dhandle)
- continue;
-
- status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
- if (!ACPI_FAILURE(status)) {
- found = true;
- break;
- }
- }
- }
-
if (!found)
return false;
+ pci_dev_put(pdev);
adev->bios = kmalloc(size, GFP_KERNEL);
if (!adev->bios) {
- DRM_ERROR("Unable to allocate bios\n");
+ dev_err(adev->dev, "Unable to allocate bios\n");
return false;
}
@@ -342,8 +334,8 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
break;
}
- if (!check_atom_bios(adev->bios, size)) {
- kfree(adev->bios);
+ if (!check_atom_bios(adev, size)) {
+ amdgpu_bios_release(adev);
return false;
}
adev->bios_size = size;
@@ -358,10 +350,8 @@ static inline bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
static bool amdgpu_read_disabled_bios(struct amdgpu_device *adev)
{
- if (adev->flags & AMD_IS_APU)
- return igp_read_bios_from_vram(adev);
- else
- return amdgpu_asic_read_disabled_bios(adev);
+ return (!adev->asic_funcs || !adev->asic_funcs->read_disabled_bios) ?
+ false : amdgpu_asic_read_disabled_bios(adev);
}
#ifdef CONFIG_ACPI
@@ -370,13 +360,13 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
struct acpi_table_header *hdr;
acpi_size tbl_size;
UEFI_ACPI_VFCT *vfct;
- unsigned offset;
+ unsigned int offset;
if (!ACPI_SUCCESS(acpi_get_table("VFCT", 1, &hdr)))
return false;
tbl_size = hdr->length;
if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
- DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
+ dev_info(adev->dev, "ACPI VFCT table present but broken (too short #1),skipping\n");
return false;
}
@@ -389,13 +379,13 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
offset += sizeof(VFCT_IMAGE_HEADER);
if (offset > tbl_size) {
- DRM_ERROR("ACPI VFCT image header truncated\n");
+ dev_info(adev->dev, "ACPI VFCT image header truncated,skipping\n");
return false;
}
offset += vhdr->ImageLength;
if (offset > tbl_size) {
- DRM_ERROR("ACPI VFCT image truncated\n");
+ dev_info(adev->dev, "ACPI VFCT image truncated,skipping\n");
return false;
}
@@ -409,8 +399,8 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
vhdr->ImageLength,
GFP_KERNEL);
- if (!check_atom_bios(adev->bios, vhdr->ImageLength)) {
- kfree(adev->bios);
+ if (!check_atom_bios(adev, vhdr->ImageLength)) {
+ amdgpu_bios_release(adev);
return false;
}
adev->bios_size = vhdr->ImageLength;
@@ -418,7 +408,7 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
}
}
- DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
+ dev_info(adev->dev, "ACPI VFCT table present but broken (too short #2),skipping\n");
return false;
}
#else
@@ -428,33 +418,158 @@ static inline bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
}
#endif
-bool amdgpu_get_bios(struct amdgpu_device *adev)
+static bool amdgpu_get_bios_apu(struct amdgpu_device *adev)
{
- if (amdgpu_atrm_get_bios(adev))
+ if (amdgpu_acpi_vfct_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from VFCT\n");
goto success;
+ }
+
+ if (amdgpu_read_bios_from_vram(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from VRAM BAR\n");
+ goto success;
+ }
- if (amdgpu_acpi_vfct_bios(adev))
+ if (amdgpu_read_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n");
goto success;
+ }
+
+ if (amdgpu_read_platform_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from platform\n");
+ goto success;
+ }
+
+ dev_err(adev->dev, "Unable to locate a BIOS ROM\n");
+ return false;
+
+success:
+ return true;
+}
+
+static bool amdgpu_prefer_rom_resource(struct amdgpu_device *adev)
+{
+ struct resource *res = &adev->pdev->resource[PCI_ROM_RESOURCE];
+
+ return (res->flags & IORESOURCE_ROM_SHADOW);
+}
- if (igp_read_bios_from_vram(adev))
+static bool amdgpu_get_bios_dgpu(struct amdgpu_device *adev)
+{
+ if (amdgpu_atrm_get_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from ATRM\n");
goto success;
+ }
- if (amdgpu_read_bios(adev))
+ if (amdgpu_acpi_vfct_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from VFCT\n");
goto success;
+ }
- if (amdgpu_read_bios_from_rom(adev))
+ /* this is required for SR-IOV */
+ if (amdgpu_read_bios_from_vram(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from VRAM BAR\n");
goto success;
+ }
+
+ if (amdgpu_prefer_rom_resource(adev)) {
+ if (amdgpu_read_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n");
+ goto success;
+ }
+
+ if (amdgpu_read_platform_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from platform\n");
+ goto success;
+ }
+
+ } else {
+ if (amdgpu_read_platform_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from platform\n");
+ goto success;
+ }
+
+ if (amdgpu_read_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from ROM BAR\n");
+ goto success;
+ }
+ }
- if (amdgpu_read_disabled_bios(adev))
+ if (amdgpu_read_bios_from_rom(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from ROM\n");
goto success;
+ }
- if (amdgpu_read_platform_bios(adev))
+ if (amdgpu_read_disabled_bios(adev)) {
+ dev_info(adev->dev, "Fetched VBIOS from disabled ROM BAR\n");
goto success;
+ }
- DRM_ERROR("Unable to locate a BIOS ROM\n");
+ dev_err(adev->dev, "Unable to locate a BIOS ROM\n");
return false;
success:
- adev->is_atom_fw = is_atom_fw(adev->bios);
+ return true;
+}
+
+bool amdgpu_get_bios(struct amdgpu_device *adev)
+{
+ bool found;
+
+ if (adev->flags & AMD_IS_APU)
+ found = amdgpu_get_bios_apu(adev);
+ else
+ found = amdgpu_get_bios_dgpu(adev);
+
+ if (found)
+ adev->is_atom_fw = adev->asic_type >= CHIP_VEGA10;
+
+ return found;
+}
+
+/* helper function for soc15 and onwards to read bios from rom */
+bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev,
+ u8 *bios, u32 length_bytes)
+{
+ u32 *dw_ptr;
+ u32 i, length_dw;
+ u32 rom_offset;
+ u32 rom_index_offset;
+ u32 rom_data_offset;
+
+ if (bios == NULL)
+ return false;
+ if (length_bytes == 0)
+ return false;
+ /* APU vbios image is part of sbios image */
+ if (adev->flags & AMD_IS_APU)
+ return false;
+ if (!adev->smuio.funcs ||
+ !adev->smuio.funcs->get_rom_index_offset ||
+ !adev->smuio.funcs->get_rom_data_offset)
+ return false;
+
+ dw_ptr = (u32 *)bios;
+ length_dw = ALIGN(length_bytes, 4) / 4;
+
+ rom_index_offset =
+ adev->smuio.funcs->get_rom_index_offset(adev);
+ rom_data_offset =
+ adev->smuio.funcs->get_rom_data_offset(adev);
+
+ if (adev->nbio.funcs &&
+ adev->nbio.funcs->get_rom_offset) {
+ rom_offset = adev->nbio.funcs->get_rom_offset(adev);
+ rom_offset = rom_offset << 17;
+ } else {
+ rom_offset = 0;
+ }
+
+ /* set rom index to rom_offset */
+ WREG32(rom_index_offset, rom_offset);
+ /* read out the rom data */
+ for (i = 0; i < length_dw; i++)
+ dw_ptr[i] = RREG32(rom_data_offset);
+
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index a6649874e6ce..66fb37b64388 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -28,78 +28,64 @@
* Christian König <deathsimple@vodafone.de>
*/
-#include <drm/drmP.h>
+#include <linux/sort.h>
+#include <linux/uaccess.h>
+
#include "amdgpu.h"
#include "amdgpu_trace.h"
#define AMDGPU_BO_LIST_MAX_PRIORITY 32u
#define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1)
-static int amdgpu_bo_list_create(struct amdgpu_fpriv *fpriv,
- struct amdgpu_bo_list **result,
- int *id)
+static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu)
{
- int r;
-
- *result = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
- if (!*result)
- return -ENOMEM;
-
- mutex_lock(&fpriv->bo_list_lock);
- r = idr_alloc(&fpriv->bo_list_handles, *result,
- 1, 0, GFP_KERNEL);
- if (r < 0) {
- mutex_unlock(&fpriv->bo_list_lock);
- kfree(*result);
- return r;
- }
- *id = r;
-
- mutex_init(&(*result)->lock);
- (*result)->num_entries = 0;
- (*result)->array = NULL;
+ struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list,
+ rhead);
+ mutex_destroy(&list->bo_list_mutex);
+ kvfree(list);
+}
- mutex_lock(&(*result)->lock);
- mutex_unlock(&fpriv->bo_list_lock);
+static void amdgpu_bo_list_free(struct kref *ref)
+{
+ struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list,
+ refcount);
+ struct amdgpu_bo_list_entry *e;
- return 0;
+ amdgpu_bo_list_for_each_entry(e, list)
+ amdgpu_bo_unref(&e->bo);
+ call_rcu(&list->rhead, amdgpu_bo_list_free_rcu);
}
-static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
+static int amdgpu_bo_list_entry_cmp(const void *_a, const void *_b)
{
- struct amdgpu_bo_list *list;
+ const struct amdgpu_bo_list_entry *a = _a, *b = _b;
- mutex_lock(&fpriv->bo_list_lock);
- list = idr_remove(&fpriv->bo_list_handles, id);
- if (list) {
- /* Another user may have a reference to this list still */
- mutex_lock(&list->lock);
- mutex_unlock(&list->lock);
- amdgpu_bo_list_free(list);
- }
- mutex_unlock(&fpriv->bo_list_lock);
+ if (a->priority > b->priority)
+ return 1;
+ if (a->priority < b->priority)
+ return -1;
+ return 0;
}
-static int amdgpu_bo_list_set(struct amdgpu_device *adev,
- struct drm_file *filp,
- struct amdgpu_bo_list *list,
- struct drm_amdgpu_bo_list_entry *info,
- unsigned num_entries)
+int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
+ struct drm_amdgpu_bo_list_entry *info,
+ size_t num_entries, struct amdgpu_bo_list **result)
{
- struct amdgpu_bo_list_entry *array;
- struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo;
- struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
- struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
-
unsigned last_entry = 0, first_userptr = num_entries;
+ struct amdgpu_bo_list_entry *array;
+ struct amdgpu_bo_list *list;
+ uint64_t total_size = 0;
unsigned i;
int r;
- unsigned long total_size = 0;
- array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry));
- if (!array)
+ list = kvzalloc(struct_size(list, entries, num_entries), GFP_KERNEL);
+ if (!list)
return -ENOMEM;
- memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
+
+ kref_init(&list->refcount);
+
+ list->num_entries = num_entries;
+ array = list->entries;
for (i = 0; i < num_entries; ++i) {
struct amdgpu_bo_list_entry *entry;
@@ -114,7 +100,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
}
bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put(gobj);
usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
if (usermm) {
@@ -128,161 +114,138 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
entry = &array[last_entry++];
}
- entry->robj = bo;
entry->priority = min(info[i].bo_priority,
AMDGPU_BO_LIST_MAX_PRIORITY);
- entry->tv.bo = &entry->robj->tbo;
- entry->tv.shared = !entry->robj->prime_shared_count;
-
- if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
- gds_obj = entry->robj;
- if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GWS)
- gws_obj = entry->robj;
- if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
- oa_obj = entry->robj;
-
- total_size += amdgpu_bo_size(entry->robj);
- trace_amdgpu_bo_list_set(list, entry->robj);
- }
+ entry->bo = bo;
- for (i = 0; i < list->num_entries; ++i)
- amdgpu_bo_unref(&list->array[i].robj);
+ if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
+ list->gds_obj = bo;
+ if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
+ list->gws_obj = bo;
+ if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
+ list->oa_obj = bo;
- drm_free_large(list->array);
+ total_size += amdgpu_bo_size(bo);
+ trace_amdgpu_bo_list_set(list, bo);
+ }
- list->gds_obj = gds_obj;
- list->gws_obj = gws_obj;
- list->oa_obj = oa_obj;
list->first_userptr = first_userptr;
- list->array = array;
- list->num_entries = num_entries;
+ sort(array, last_entry, sizeof(struct amdgpu_bo_list_entry),
+ amdgpu_bo_list_entry_cmp, NULL);
trace_amdgpu_cs_bo_status(list->num_entries, total_size);
+
+ mutex_init(&list->bo_list_mutex);
+ *result = list;
return 0;
error_free:
- while (i--)
- amdgpu_bo_unref(&array[i].robj);
- drm_free_large(array);
+ for (i = 0; i < last_entry; ++i)
+ amdgpu_bo_unref(&array[i].bo);
+ for (i = first_userptr; i < num_entries; ++i)
+ amdgpu_bo_unref(&array[i].bo);
+ kvfree(list);
return r;
+
}
-struct amdgpu_bo_list *
-amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
+static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
{
- struct amdgpu_bo_list *result;
+ struct amdgpu_bo_list *list;
mutex_lock(&fpriv->bo_list_lock);
- result = idr_find(&fpriv->bo_list_handles, id);
- if (result)
- mutex_lock(&result->lock);
+ list = idr_remove(&fpriv->bo_list_handles, id);
mutex_unlock(&fpriv->bo_list_lock);
- return result;
+ if (list)
+ kref_put(&list->refcount, amdgpu_bo_list_free);
}
-void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
- struct list_head *validated)
+int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
+ struct amdgpu_bo_list **result)
{
- /* This is based on the bucket sort with O(n) time complexity.
- * An item with priority "i" is added to bucket[i]. The lists are then
- * concatenated in descending order.
- */
- struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
- unsigned i;
-
- for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
- INIT_LIST_HEAD(&bucket[i]);
-
- /* Since buffers which appear sooner in the relocation list are
- * likely to be used more often than buffers which appear later
- * in the list, the sort mustn't change the ordering of buffers
- * with the same priority, i.e. it must be stable.
- */
- for (i = 0; i < list->num_entries; i++) {
- unsigned priority = list->array[i].priority;
+ rcu_read_lock();
+ *result = idr_find(&fpriv->bo_list_handles, id);
- list_add_tail(&list->array[i].tv.head,
- &bucket[priority]);
- list->array[i].user_pages = NULL;
+ if (*result && kref_get_unless_zero(&(*result)->refcount)) {
+ rcu_read_unlock();
+ return 0;
}
- /* Connect the sorted buckets in the output list. */
- for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
- list_splice(&bucket[i], validated);
+ rcu_read_unlock();
+ *result = NULL;
+ return -ENOENT;
}
void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
{
- mutex_unlock(&list->lock);
+ kref_put(&list->refcount, amdgpu_bo_list_free);
}
-void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
+int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
+ struct drm_amdgpu_bo_list_entry **info_param)
{
- unsigned i;
+ const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
+ const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
+ const uint32_t bo_info_size = in->bo_info_size;
+ const uint32_t bo_number = in->bo_number;
+ struct drm_amdgpu_bo_list_entry *info;
+
+ /* copy the handle array from userspace to a kernel buffer */
+ if (likely(info_size == bo_info_size)) {
+ info = vmemdup_array_user(uptr, bo_number, info_size);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+ } else {
+ const uint32_t bytes = min(bo_info_size, info_size);
+ unsigned i;
- for (i = 0; i < list->num_entries; ++i)
- amdgpu_bo_unref(&list->array[i].robj);
+ info = kvmalloc_array(bo_number, info_size, GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
- mutex_destroy(&list->lock);
- drm_free_large(list->array);
- kfree(list);
+ memset(info, 0, bo_number * info_size);
+ for (i = 0; i < bo_number; ++i, uptr += bo_info_size) {
+ if (copy_from_user(&info[i], uptr, bytes)) {
+ kvfree(info);
+ return -EFAULT;
+ }
+ }
+ }
+
+ *info_param = info;
+ return 0;
}
int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
- const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
-
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
union drm_amdgpu_bo_list *args = data;
uint32_t handle = args->in.list_handle;
- const void __user *uptr = (const void*)(uintptr_t)args->in.bo_info_ptr;
-
- struct drm_amdgpu_bo_list_entry *info;
- struct amdgpu_bo_list *list;
-
+ struct drm_amdgpu_bo_list_entry *info = NULL;
+ struct amdgpu_bo_list *list, *old;
int r;
- info = drm_malloc_ab(args->in.bo_number,
- sizeof(struct drm_amdgpu_bo_list_entry));
- if (!info)
- return -ENOMEM;
-
- /* copy the handle array from userspace to a kernel buffer */
- r = -EFAULT;
- if (likely(info_size == args->in.bo_info_size)) {
- unsigned long bytes = args->in.bo_number *
- args->in.bo_info_size;
-
- if (copy_from_user(info, uptr, bytes))
- goto error_free;
-
- } else {
- unsigned long bytes = min(args->in.bo_info_size, info_size);
- unsigned i;
-
- memset(info, 0, args->in.bo_number * info_size);
- for (i = 0; i < args->in.bo_number; ++i) {
- if (copy_from_user(&info[i], uptr, bytes))
- goto error_free;
-
- uptr += args->in.bo_info_size;
- }
- }
+ r = amdgpu_bo_create_list_entry_array(&args->in, &info);
+ if (r)
+ return r;
switch (args->in.operation) {
case AMDGPU_BO_LIST_OP_CREATE:
- r = amdgpu_bo_list_create(fpriv, &list, &handle);
+ r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
+ &list);
if (r)
goto error_free;
- r = amdgpu_bo_list_set(adev, filp, list, info,
- args->in.bo_number);
- amdgpu_bo_list_put(list);
- if (r)
- goto error_free;
+ mutex_lock(&fpriv->bo_list_lock);
+ r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
+ mutex_unlock(&fpriv->bo_list_lock);
+ if (r < 0) {
+ goto error_put_list;
+ }
+ handle = r;
break;
case AMDGPU_BO_LIST_OP_DESTROY:
@@ -291,17 +254,21 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
break;
case AMDGPU_BO_LIST_OP_UPDATE:
- r = -ENOENT;
- list = amdgpu_bo_list_get(fpriv, handle);
- if (!list)
- goto error_free;
-
- r = amdgpu_bo_list_set(adev, filp, list, info,
- args->in.bo_number);
- amdgpu_bo_list_put(list);
+ r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
+ &list);
if (r)
goto error_free;
+ mutex_lock(&fpriv->bo_list_lock);
+ old = idr_replace(&fpriv->bo_list_handles, list, handle);
+ mutex_unlock(&fpriv->bo_list_lock);
+
+ if (IS_ERR(old)) {
+ r = PTR_ERR(old);
+ goto error_put_list;
+ }
+
+ amdgpu_bo_list_put(old);
break;
default:
@@ -311,11 +278,14 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
memset(args, 0, sizeof(*args));
args->out.list_handle = handle;
- drm_free_large(info);
+ kvfree(info);
return 0;
+error_put_list:
+ amdgpu_bo_list_put(list);
+
error_free:
- drm_free_large(info);
+ kvfree(info);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
new file mode 100644
index 000000000000..a716c9886c74
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_BO_LIST_H__
+#define __AMDGPU_BO_LIST_H__
+
+#include <drm/amdgpu_drm.h>
+
+struct hmm_range;
+
+struct drm_file;
+
+struct amdgpu_device;
+struct amdgpu_bo;
+struct amdgpu_bo_va;
+struct amdgpu_fpriv;
+
+struct amdgpu_bo_list_entry {
+ struct amdgpu_bo *bo;
+ struct amdgpu_bo_va *bo_va;
+ uint32_t priority;
+ struct hmm_range *range;
+ bool user_invalidated;
+};
+
+struct amdgpu_bo_list {
+ struct rcu_head rhead;
+ struct kref refcount;
+ struct amdgpu_bo *gds_obj;
+ struct amdgpu_bo *gws_obj;
+ struct amdgpu_bo *oa_obj;
+ unsigned first_userptr;
+ unsigned num_entries;
+
+ /* Protect access during command submission.
+ */
+ struct mutex bo_list_mutex;
+
+ struct amdgpu_bo_list_entry entries[] __counted_by(num_entries);
+};
+
+int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
+ struct amdgpu_bo_list **result);
+void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
+int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
+ struct drm_amdgpu_bo_list_entry **info_param);
+
+int amdgpu_bo_list_create(struct amdgpu_device *adev,
+ struct drm_file *filp,
+ struct drm_amdgpu_bo_list_entry *info,
+ size_t num_entries,
+ struct amdgpu_bo_list **list);
+
+#define amdgpu_bo_list_for_each_entry(e, list) \
+ for (e = list->entries; \
+ e != &list->entries[list->num_entries]; \
+ ++e)
+
+#define amdgpu_bo_list_for_each_userptr_entry(e, list) \
+ for (e = &list->entries[list->first_userptr]; \
+ e != &list->entries[list->num_entries]; \
+ ++e)
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index c6dba1eaefbd..004a6a9d6b9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -22,14 +22,12 @@
*
*/
#include <linux/list.h>
-#include <linux/slab.h>
#include <linux/pci.h>
-#include <linux/acpi.h>
-#include <drm/drmP.h>
+#include <linux/slab.h>
+
#include <linux/firmware.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
-#include "cgs_linux.h"
#include "atom.h"
#include "amdgpu_ucode.h"
@@ -42,180 +40,14 @@ struct amdgpu_cgs_device {
struct amdgpu_device *adev = \
((struct amdgpu_cgs_device *)cgs_device)->adev
-static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
- enum cgs_gpu_mem_type type,
- uint64_t size, uint64_t align,
- uint64_t min_offset, uint64_t max_offset,
- cgs_handle_t *handle)
-{
- CGS_FUNC_ADEV;
- uint16_t flags = 0;
- int ret = 0;
- uint32_t domain = 0;
- struct amdgpu_bo *obj;
- struct ttm_placement placement;
- struct ttm_place place;
-
- if (min_offset > max_offset) {
- BUG_ON(1);
- return -EINVAL;
- }
-
- /* fail if the alignment is not a power of 2 */
- if (((align != 1) && (align & (align - 1)))
- || size == 0 || align == 0)
- return -EINVAL;
-
-
- switch(type) {
- case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
- case CGS_GPU_MEM_TYPE__VISIBLE_FB:
- flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- domain = AMDGPU_GEM_DOMAIN_VRAM;
- if (max_offset > adev->mc.real_vram_size)
- return -EINVAL;
- place.fpfn = min_offset >> PAGE_SHIFT;
- place.lpfn = max_offset >> PAGE_SHIFT;
- place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM;
- break;
- case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
- case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
- flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- domain = AMDGPU_GEM_DOMAIN_VRAM;
- if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
- place.fpfn =
- max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
- place.lpfn =
- min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
- place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM;
- }
-
- break;
- case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
- domain = AMDGPU_GEM_DOMAIN_GTT;
- place.fpfn = min_offset >> PAGE_SHIFT;
- place.lpfn = max_offset >> PAGE_SHIFT;
- place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
- break;
- case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
- flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
- domain = AMDGPU_GEM_DOMAIN_GTT;
- place.fpfn = min_offset >> PAGE_SHIFT;
- place.lpfn = max_offset >> PAGE_SHIFT;
- place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
- TTM_PL_FLAG_UNCACHED;
- break;
- default:
- return -EINVAL;
- }
-
- *handle = 0;
-
- placement.placement = &place;
- placement.num_placement = 1;
- placement.busy_placement = &place;
- placement.num_busy_placement = 1;
-
- ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
- true, domain, flags,
- NULL, &placement, NULL,
- &obj);
- if (ret) {
- DRM_ERROR("(%d) bo create failed\n", ret);
- return ret;
- }
- *handle = (cgs_handle_t)obj;
-
- return ret;
-}
-
-static int amdgpu_cgs_free_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
-{
- struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
-
- if (obj) {
- int r = amdgpu_bo_reserve(obj, true);
- if (likely(r == 0)) {
- amdgpu_bo_kunmap(obj);
- amdgpu_bo_unpin(obj);
- amdgpu_bo_unreserve(obj);
- }
- amdgpu_bo_unref(&obj);
-
- }
- return 0;
-}
-
-static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
- uint64_t *mcaddr)
-{
- int r;
- u64 min_offset, max_offset;
- struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
-
- WARN_ON_ONCE(obj->placement.num_placement > 1);
-
- min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
- max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
-
- r = amdgpu_bo_reserve(obj, true);
- if (unlikely(r != 0))
- return r;
- r = amdgpu_bo_pin_restricted(obj, obj->prefered_domains,
- min_offset, max_offset, mcaddr);
- amdgpu_bo_unreserve(obj);
- return r;
-}
-
-static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
-{
- int r;
- struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
- r = amdgpu_bo_reserve(obj, true);
- if (unlikely(r != 0))
- return r;
- r = amdgpu_bo_unpin(obj);
- amdgpu_bo_unreserve(obj);
- return r;
-}
-
-static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle,
- void **map)
-{
- int r;
- struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
- r = amdgpu_bo_reserve(obj, true);
- if (unlikely(r != 0))
- return r;
- r = amdgpu_bo_kmap(obj, map);
- amdgpu_bo_unreserve(obj);
- return r;
-}
-
-static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t handle)
-{
- int r;
- struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
- r = amdgpu_bo_reserve(obj, true);
- if (unlikely(r != 0))
- return r;
- amdgpu_bo_kunmap(obj);
- amdgpu_bo_unreserve(obj);
- return r;
-}
-
-static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned offset)
+static uint32_t amdgpu_cgs_read_register(struct cgs_device *cgs_device, unsigned int offset)
{
CGS_FUNC_ADEV;
return RREG32(offset);
}
-static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned offset,
+static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned int offset,
uint32_t value)
{
CGS_FUNC_ADEV;
@@ -224,12 +56,10 @@ static void amdgpu_cgs_write_register(struct cgs_device *cgs_device, unsigned of
static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
enum cgs_ind_reg space,
- unsigned index)
+ unsigned int index)
{
CGS_FUNC_ADEV;
switch (space) {
- case CGS_IND_REG__MMIO:
- return RREG32_IDX(index);
case CGS_IND_REG__PCIE:
return RREG32_PCIE(index);
case CGS_IND_REG__SMC:
@@ -240,9 +70,13 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
return RREG32_DIDT(index);
case CGS_IND_REG_GC_CAC:
return RREG32_GC_CAC(index);
+ case CGS_IND_REG_SE_CAC:
+ return RREG32_SE_CAC(index);
case CGS_IND_REG__AUDIO_ENDPT:
DRM_ERROR("audio endpt register access not implemented.\n");
return 0;
+ default:
+ BUG();
}
WARN(1, "Invalid indirect register space");
return 0;
@@ -250,12 +84,10 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
enum cgs_ind_reg space,
- unsigned index, uint32_t value)
+ unsigned int index, uint32_t value)
{
CGS_FUNC_ADEV;
switch (space) {
- case CGS_IND_REG__MMIO:
- return WREG32_IDX(index, value);
case CGS_IND_REG__PCIE:
return WREG32_PCIE(index, value);
case CGS_IND_REG__SMC:
@@ -266,231 +98,17 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
return WREG32_DIDT(index, value);
case CGS_IND_REG_GC_CAC:
return WREG32_GC_CAC(index, value);
+ case CGS_IND_REG_SE_CAC:
+ return WREG32_SE_CAC(index, value);
case CGS_IND_REG__AUDIO_ENDPT:
DRM_ERROR("audio endpt register access not implemented.\n");
return;
- }
- WARN(1, "Invalid indirect register space");
-}
-
-static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device,
- enum cgs_resource_type resource_type,
- uint64_t size,
- uint64_t offset,
- uint64_t *resource_base)
-{
- CGS_FUNC_ADEV;
-
- if (resource_base == NULL)
- return -EINVAL;
-
- switch (resource_type) {
- case CGS_RESOURCE_TYPE_MMIO:
- if (adev->rmmio_size == 0)
- return -ENOENT;
- if ((offset + size) > adev->rmmio_size)
- return -EINVAL;
- *resource_base = adev->rmmio_base;
- return 0;
- case CGS_RESOURCE_TYPE_DOORBELL:
- if (adev->doorbell.size == 0)
- return -ENOENT;
- if ((offset + size) > adev->doorbell.size)
- return -EINVAL;
- *resource_base = adev->doorbell.base;
- return 0;
- case CGS_RESOURCE_TYPE_FB:
- case CGS_RESOURCE_TYPE_IO:
- case CGS_RESOURCE_TYPE_ROM:
default:
- return -EINVAL;
- }
-}
-
-static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device,
- unsigned table, uint16_t *size,
- uint8_t *frev, uint8_t *crev)
-{
- CGS_FUNC_ADEV;
- uint16_t data_start;
-
- if (amdgpu_atom_parse_data_header(
- adev->mode_info.atom_context, table, size,
- frev, crev, &data_start))
- return (uint8_t*)adev->mode_info.atom_context->bios +
- data_start;
-
- return NULL;
-}
-
-static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table,
- uint8_t *frev, uint8_t *crev)
-{
- CGS_FUNC_ADEV;
-
- if (amdgpu_atom_parse_cmd_header(
- adev->mode_info.atom_context, table,
- frev, crev))
- return 0;
-
- return -EINVAL;
-}
-
-static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table,
- void *args)
-{
- CGS_FUNC_ADEV;
-
- return amdgpu_atom_execute_table(
- adev->mode_info.atom_context, table, args);
-}
-
-struct cgs_irq_params {
- unsigned src_id;
- cgs_irq_source_set_func_t set;
- cgs_irq_handler_func_t handler;
- void *private_data;
-};
-
-static int cgs_set_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *src,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- struct cgs_irq_params *irq_params =
- (struct cgs_irq_params *)src->data;
- if (!irq_params)
- return -EINVAL;
- if (!irq_params->set)
- return -EINVAL;
- return irq_params->set(irq_params->private_data,
- irq_params->src_id,
- type,
- (int)state);
-}
-
-static int cgs_process_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- struct cgs_irq_params *irq_params =
- (struct cgs_irq_params *)source->data;
- if (!irq_params)
- return -EINVAL;
- if (!irq_params->handler)
- return -EINVAL;
- return irq_params->handler(irq_params->private_data,
- irq_params->src_id,
- entry->iv_entry);
-}
-
-static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
- .set = cgs_set_irq_state,
- .process = cgs_process_irq,
-};
-
-static int amdgpu_cgs_add_irq_source(void *cgs_device,
- unsigned client_id,
- unsigned src_id,
- unsigned num_types,
- cgs_irq_source_set_func_t set,
- cgs_irq_handler_func_t handler,
- void *private_data)
-{
- CGS_FUNC_ADEV;
- int ret = 0;
- struct cgs_irq_params *irq_params;
- struct amdgpu_irq_src *source =
- kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
- if (!source)
- return -ENOMEM;
- irq_params =
- kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL);
- if (!irq_params) {
- kfree(source);
- return -ENOMEM;
+ BUG();
}
- source->num_types = num_types;
- source->funcs = &cgs_irq_funcs;
- irq_params->src_id = src_id;
- irq_params->set = set;
- irq_params->handler = handler;
- irq_params->private_data = private_data;
- source->data = (void *)irq_params;
- ret = amdgpu_irq_add_id(adev, client_id, src_id, source);
- if (ret) {
- kfree(irq_params);
- kfree(source);
- }
-
- return ret;
-}
-
-static int amdgpu_cgs_irq_get(void *cgs_device, unsigned client_id,
- unsigned src_id, unsigned type)
-{
- CGS_FUNC_ADEV;
-
- if (!adev->irq.client[client_id].sources)
- return -EINVAL;
-
- return amdgpu_irq_get(adev, adev->irq.client[client_id].sources[src_id], type);
-}
-
-static int amdgpu_cgs_irq_put(void *cgs_device, unsigned client_id,
- unsigned src_id, unsigned type)
-{
- CGS_FUNC_ADEV;
-
- if (!adev->irq.client[client_id].sources)
- return -EINVAL;
-
- return amdgpu_irq_put(adev, adev->irq.client[client_id].sources[src_id], type);
-}
-
-static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
- enum amd_ip_block_type block_type,
- enum amd_clockgating_state state)
-{
- CGS_FUNC_ADEV;
- int i, r = -1;
-
- for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_blocks[i].status.valid)
- continue;
-
- if (adev->ip_blocks[i].version->type == block_type) {
- r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
- (void *)adev,
- state);
- break;
- }
- }
- return r;
-}
-
-static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device,
- enum amd_ip_block_type block_type,
- enum amd_powergating_state state)
-{
- CGS_FUNC_ADEV;
- int i, r = -1;
-
- for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_blocks[i].status.valid)
- continue;
-
- if (adev->ip_blocks[i].version->type == block_type) {
- r = adev->ip_blocks[i].version->funcs->set_powergating_state(
- (void *)adev,
- state);
- break;
- }
- }
- return r;
+ WARN(1, "Invalid indirect register space");
}
-
static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
{
CGS_FUNC_ADEV;
@@ -538,18 +156,6 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type)
return result;
}
-static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type)
-{
- CGS_FUNC_ADEV;
- if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) {
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
- return 0;
- }
- /* cannot release other firmware because they are not created by cgs */
- return -EINVAL;
-}
-
static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
enum cgs_ucode_id type)
{
@@ -557,66 +163,49 @@ static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device,
uint16_t fw_version = 0;
switch (type) {
- case CGS_UCODE_ID_SDMA0:
- fw_version = adev->sdma.instance[0].fw_version;
- break;
- case CGS_UCODE_ID_SDMA1:
- fw_version = adev->sdma.instance[1].fw_version;
- break;
- case CGS_UCODE_ID_CP_CE:
- fw_version = adev->gfx.ce_fw_version;
- break;
- case CGS_UCODE_ID_CP_PFP:
- fw_version = adev->gfx.pfp_fw_version;
- break;
- case CGS_UCODE_ID_CP_ME:
- fw_version = adev->gfx.me_fw_version;
- break;
- case CGS_UCODE_ID_CP_MEC:
- fw_version = adev->gfx.mec_fw_version;
- break;
- case CGS_UCODE_ID_CP_MEC_JT1:
- fw_version = adev->gfx.mec_fw_version;
- break;
- case CGS_UCODE_ID_CP_MEC_JT2:
- fw_version = adev->gfx.mec_fw_version;
- break;
- case CGS_UCODE_ID_RLC_G:
- fw_version = adev->gfx.rlc_fw_version;
- break;
- case CGS_UCODE_ID_STORAGE:
- break;
- default:
- DRM_ERROR("firmware type %d do not have version\n", type);
- break;
+ case CGS_UCODE_ID_SDMA0:
+ fw_version = adev->sdma.instance[0].fw_version;
+ break;
+ case CGS_UCODE_ID_SDMA1:
+ fw_version = adev->sdma.instance[1].fw_version;
+ break;
+ case CGS_UCODE_ID_CP_CE:
+ fw_version = adev->gfx.ce_fw_version;
+ break;
+ case CGS_UCODE_ID_CP_PFP:
+ fw_version = adev->gfx.pfp_fw_version;
+ break;
+ case CGS_UCODE_ID_CP_ME:
+ fw_version = adev->gfx.me_fw_version;
+ break;
+ case CGS_UCODE_ID_CP_MEC:
+ fw_version = adev->gfx.mec_fw_version;
+ break;
+ case CGS_UCODE_ID_CP_MEC_JT1:
+ fw_version = adev->gfx.mec_fw_version;
+ break;
+ case CGS_UCODE_ID_CP_MEC_JT2:
+ fw_version = adev->gfx.mec_fw_version;
+ break;
+ case CGS_UCODE_ID_RLC_G:
+ fw_version = adev->gfx.rlc_fw_version;
+ break;
+ case CGS_UCODE_ID_STORAGE:
+ break;
+ default:
+ DRM_ERROR("firmware type %d do not have version\n", type);
+ break;
}
return fw_version;
}
-static int amdgpu_cgs_enter_safe_mode(struct cgs_device *cgs_device,
- bool en)
-{
- CGS_FUNC_ADEV;
-
- if (adev->gfx.rlc.funcs->enter_safe_mode == NULL ||
- adev->gfx.rlc.funcs->exit_safe_mode == NULL)
- return 0;
-
- if (en)
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
- else
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
-
- return 0;
-}
-
static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
enum cgs_ucode_id type,
struct cgs_firmware_info *info)
{
CGS_FUNC_ADEV;
- if ((CGS_UCODE_ID_SMU != type) && (CGS_UCODE_ID_SMU_SK != type)) {
+ if (type != CGS_UCODE_ID_SMU && type != CGS_UCODE_ID_SMU_SK) {
uint64_t gpu_addr;
uint32_t data_size;
const struct gfx_firmware_header_v1_0 *header;
@@ -624,6 +213,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
struct amdgpu_firmware_info *ucode;
id = fw_type_convert(cgs_device, type);
+ if (id >= AMDGPU_UCODE_ID_MAXIMUM)
+ return -EINVAL;
+
ucode = &adev->firmware.ucode[id];
if (ucode->fw == NULL)
return -EINVAL;
@@ -643,8 +235,8 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
info->mc_addr = gpu_addr;
info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
- if (CGS_UCODE_ID_CP_MEC == type)
- info->image_size = (header->jt_offset) << 2;
+ if (type == CGS_UCODE_ID_CP_MEC)
+ info->image_size = le32_to_cpu(header->jt_offset) << 2;
info->fw_version = amdgpu_get_firmware_version(cgs_device, type);
info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
@@ -660,83 +252,113 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (!adev->pm.fw) {
switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ if ((adev->pdev->revision == 0x80) ||
+ (adev->pdev->revision == 0x81) ||
+ (adev->pdev->device == 0x665f)) {
+ info->is_kicker = true;
+ strscpy(fw_name, "amdgpu/bonaire_k_smc.bin");
+ } else {
+ strscpy(fw_name, "amdgpu/bonaire_smc.bin");
+ }
+ break;
+ case CHIP_HAWAII:
+ if (adev->pdev->revision == 0x80) {
+ info->is_kicker = true;
+ strscpy(fw_name, "amdgpu/hawaii_k_smc.bin");
+ } else {
+ strscpy(fw_name, "amdgpu/hawaii_smc.bin");
+ }
+ break;
case CHIP_TOPAZ:
if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
- ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87))) {
+ ((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)) ||
+ ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD1)) ||
+ ((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0xD3))) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
+ strscpy(fw_name, "amdgpu/topaz_k_smc.bin");
} else
- strcpy(fw_name, "amdgpu/topaz_smc.bin");
+ strscpy(fw_name, "amdgpu/topaz_smc.bin");
break;
case CHIP_TONGA:
if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1))) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
+ strscpy(fw_name, "amdgpu/tonga_k_smc.bin");
} else
- strcpy(fw_name, "amdgpu/tonga_smc.bin");
+ strscpy(fw_name, "amdgpu/tonga_smc.bin");
break;
case CHIP_FIJI:
- strcpy(fw_name, "amdgpu/fiji_smc.bin");
+ strscpy(fw_name, "amdgpu/fiji_smc.bin");
break;
case CHIP_POLARIS11:
if (type == CGS_UCODE_ID_SMU) {
- if (((adev->pdev->device == 0x67ef) &&
- ((adev->pdev->revision == 0xe0) ||
- (adev->pdev->revision == 0xe2) ||
- (adev->pdev->revision == 0xe5))) ||
- ((adev->pdev->device == 0x67ff) &&
- ((adev->pdev->revision == 0xcf) ||
- (adev->pdev->revision == 0xef) ||
- (adev->pdev->revision == 0xff)))) {
+ if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris11_k_smc.bin");
- } else
- strcpy(fw_name, "amdgpu/polaris11_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris11_k_smc.bin");
+ } else if (ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) {
+ info->is_kicker = true;
+ strscpy(fw_name, "amdgpu/polaris11_k2_smc.bin");
+ } else {
+ strscpy(fw_name, "amdgpu/polaris11_smc.bin");
+ }
} else if (type == CGS_UCODE_ID_SMU_SK) {
- strcpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
+ strscpy(fw_name, "amdgpu/polaris11_smc_sk.bin");
}
break;
case CHIP_POLARIS10:
if (type == CGS_UCODE_ID_SMU) {
- if ((adev->pdev->device == 0x67df) &&
- ((adev->pdev->revision == 0xe0) ||
- (adev->pdev->revision == 0xe3) ||
- (adev->pdev->revision == 0xe4) ||
- (adev->pdev->revision == 0xe5) ||
- (adev->pdev->revision == 0xe7) ||
- (adev->pdev->revision == 0xef))) {
+ if (ASICID_IS_P20(adev->pdev->device, adev->pdev->revision)) {
+ info->is_kicker = true;
+ strscpy(fw_name, "amdgpu/polaris10_k_smc.bin");
+ } else if (ASICID_IS_P30(adev->pdev->device, adev->pdev->revision)) {
info->is_kicker = true;
- strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
- } else
- strcpy(fw_name, "amdgpu/polaris10_smc.bin");
+ strscpy(fw_name, "amdgpu/polaris10_k2_smc.bin");
+ } else {
+ strscpy(fw_name, "amdgpu/polaris10_smc.bin");
+ }
} else if (type == CGS_UCODE_ID_SMU_SK) {
- strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
+ strscpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
}
break;
case CHIP_POLARIS12:
- strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+ if (ASICID_IS_P23(adev->pdev->device, adev->pdev->revision)) {
+ info->is_kicker = true;
+ strscpy(fw_name, "amdgpu/polaris12_k_smc.bin");
+ } else {
+ strscpy(fw_name, "amdgpu/polaris12_smc.bin");
+ }
+ break;
+ case CHIP_VEGAM:
+ strscpy(fw_name, "amdgpu/vegam_smc.bin");
break;
case CHIP_VEGA10:
- strcpy(fw_name, "amdgpu/vega10_smc.bin");
+ if ((adev->pdev->device == 0x687f) &&
+ ((adev->pdev->revision == 0xc0) ||
+ (adev->pdev->revision == 0xc1) ||
+ (adev->pdev->revision == 0xc3)))
+ strscpy(fw_name, "amdgpu/vega10_acg_smc.bin");
+ else
+ strscpy(fw_name, "amdgpu/vega10_smc.bin");
+ break;
+ case CHIP_VEGA12:
+ strscpy(fw_name, "amdgpu/vega12_smc.bin");
+ break;
+ case CHIP_VEGA20:
+ strscpy(fw_name, "amdgpu/vega20_smc.bin");
break;
default:
DRM_ERROR("SMC firmware not supported\n");
return -EINVAL;
}
- err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err) {
- DRM_ERROR("Failed to request firmware\n");
- return err;
- }
-
- err = amdgpu_ucode_validate(adev->pm.fw);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw,
+ AMDGPU_UCODE_REQUIRED,
+ "%s", fw_name);
if (err) {
DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
+ amdgpu_ucode_release(&adev->pm.fw);
return err;
}
@@ -766,357 +388,12 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
return 0;
}
-static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device)
-{
- CGS_FUNC_ADEV;
- return amdgpu_sriov_vf(adev);
-}
-
-static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
- struct cgs_system_info *sys_info)
-{
- CGS_FUNC_ADEV;
-
- if (NULL == sys_info)
- return -ENODEV;
-
- if (sizeof(struct cgs_system_info) != sys_info->size)
- return -ENODEV;
-
- switch (sys_info->info_id) {
- case CGS_SYSTEM_INFO_ADAPTER_BDF_ID:
- sys_info->value = adev->pdev->devfn | (adev->pdev->bus->number << 8);
- break;
- case CGS_SYSTEM_INFO_PCIE_GEN_INFO:
- sys_info->value = adev->pm.pcie_gen_mask;
- break;
- case CGS_SYSTEM_INFO_PCIE_MLW:
- sys_info->value = adev->pm.pcie_mlw_mask;
- break;
- case CGS_SYSTEM_INFO_PCIE_DEV:
- sys_info->value = adev->pdev->device;
- break;
- case CGS_SYSTEM_INFO_PCIE_REV:
- sys_info->value = adev->pdev->revision;
- break;
- case CGS_SYSTEM_INFO_CG_FLAGS:
- sys_info->value = adev->cg_flags;
- break;
- case CGS_SYSTEM_INFO_PG_FLAGS:
- sys_info->value = adev->pg_flags;
- break;
- case CGS_SYSTEM_INFO_GFX_CU_INFO:
- sys_info->value = adev->gfx.cu_info.number;
- break;
- case CGS_SYSTEM_INFO_GFX_SE_INFO:
- sys_info->value = adev->gfx.config.max_shader_engines;
- break;
- case CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID:
- sys_info->value = adev->pdev->subsystem_device;
- break;
- case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID:
- sys_info->value = adev->pdev->subsystem_vendor;
- break;
- default:
- return -ENODEV;
- }
-
- return 0;
-}
-
-static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device,
- struct cgs_display_info *info)
-{
- CGS_FUNC_ADEV;
- struct amdgpu_crtc *amdgpu_crtc;
- struct drm_device *ddev = adev->ddev;
- struct drm_crtc *crtc;
- uint32_t line_time_us, vblank_lines;
- struct cgs_mode_info *mode_info;
-
- if (info == NULL)
- return -EINVAL;
-
- mode_info = info->mode_info;
-
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc,
- &ddev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled) {
- info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
- info->display_count++;
- }
- if (mode_info != NULL &&
- crtc->enabled && amdgpu_crtc->enabled &&
- amdgpu_crtc->hw_mode.clock) {
- line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
- amdgpu_crtc->hw_mode.clock;
- vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
- amdgpu_crtc->hw_mode.crtc_vdisplay +
- (amdgpu_crtc->v_border * 2);
- mode_info->vblank_time_us = vblank_lines * line_time_us;
- mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
- mode_info->ref_clock = adev->clock.spll.reference_freq;
- mode_info = NULL;
- }
- }
- }
-
- return 0;
-}
-
-
-static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled)
-{
- CGS_FUNC_ADEV;
-
- adev->pm.dpm_enabled = enabled;
-
- return 0;
-}
-
-/** \brief evaluate acpi namespace object, handle or pathname must be valid
- * \param cgs_device
- * \param info input/output arguments for the control method
- * \return status
- */
-
-#if defined(CONFIG_ACPI)
-static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
- struct cgs_acpi_method_info *info)
-{
- CGS_FUNC_ADEV;
- acpi_handle handle;
- struct acpi_object_list input;
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
- union acpi_object *params, *obj;
- uint8_t name[5] = {'\0'};
- struct cgs_acpi_method_argument *argument;
- uint32_t i, count;
- acpi_status status;
- int result;
-
- handle = ACPI_HANDLE(&adev->pdev->dev);
- if (!handle)
- return -ENODEV;
-
- memset(&input, 0, sizeof(struct acpi_object_list));
-
- /* validate input info */
- if (info->size != sizeof(struct cgs_acpi_method_info))
- return -EINVAL;
-
- input.count = info->input_count;
- if (info->input_count > 0) {
- if (info->pinput_argument == NULL)
- return -EINVAL;
- argument = info->pinput_argument;
- for (i = 0; i < info->input_count; i++) {
- if (((argument->type == ACPI_TYPE_STRING) ||
- (argument->type == ACPI_TYPE_BUFFER)) &&
- (argument->pointer == NULL))
- return -EINVAL;
- argument++;
- }
- }
-
- if (info->output_count > 0) {
- if (info->poutput_argument == NULL)
- return -EINVAL;
- argument = info->poutput_argument;
- for (i = 0; i < info->output_count; i++) {
- if (((argument->type == ACPI_TYPE_STRING) ||
- (argument->type == ACPI_TYPE_BUFFER))
- && (argument->pointer == NULL))
- return -EINVAL;
- argument++;
- }
- }
-
- /* The path name passed to acpi_evaluate_object should be null terminated */
- if ((info->field & CGS_ACPI_FIELD_METHOD_NAME) != 0) {
- strncpy(name, (char *)&(info->name), sizeof(uint32_t));
- name[4] = '\0';
- }
-
- /* parse input parameters */
- if (input.count > 0) {
- input.pointer = params =
- kzalloc(sizeof(union acpi_object) * input.count, GFP_KERNEL);
- if (params == NULL)
- return -EINVAL;
-
- argument = info->pinput_argument;
-
- for (i = 0; i < input.count; i++) {
- params->type = argument->type;
- switch (params->type) {
- case ACPI_TYPE_INTEGER:
- params->integer.value = argument->value;
- break;
- case ACPI_TYPE_STRING:
- params->string.length = argument->data_length;
- params->string.pointer = argument->pointer;
- break;
- case ACPI_TYPE_BUFFER:
- params->buffer.length = argument->data_length;
- params->buffer.pointer = argument->pointer;
- break;
- default:
- break;
- }
- params++;
- argument++;
- }
- }
-
- /* parse output info */
- count = info->output_count;
- argument = info->poutput_argument;
-
- /* evaluate the acpi method */
- status = acpi_evaluate_object(handle, name, &input, &output);
-
- if (ACPI_FAILURE(status)) {
- result = -EIO;
- goto free_input;
- }
-
- /* return the output info */
- obj = output.pointer;
-
- if (count > 1) {
- if ((obj->type != ACPI_TYPE_PACKAGE) ||
- (obj->package.count != count)) {
- result = -EIO;
- goto free_obj;
- }
- params = obj->package.elements;
- } else
- params = obj;
-
- if (params == NULL) {
- result = -EIO;
- goto free_obj;
- }
-
- for (i = 0; i < count; i++) {
- if (argument->type != params->type) {
- result = -EIO;
- goto free_obj;
- }
- switch (params->type) {
- case ACPI_TYPE_INTEGER:
- argument->value = params->integer.value;
- break;
- case ACPI_TYPE_STRING:
- if ((params->string.length != argument->data_length) ||
- (params->string.pointer == NULL)) {
- result = -EIO;
- goto free_obj;
- }
- strncpy(argument->pointer,
- params->string.pointer,
- params->string.length);
- break;
- case ACPI_TYPE_BUFFER:
- if (params->buffer.pointer == NULL) {
- result = -EIO;
- goto free_obj;
- }
- memcpy(argument->pointer,
- params->buffer.pointer,
- argument->data_length);
- break;
- default:
- break;
- }
- argument++;
- params++;
- }
-
- result = 0;
-free_obj:
- kfree(obj);
-free_input:
- kfree((void *)input.pointer);
- return result;
-}
-#else
-static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
- struct cgs_acpi_method_info *info)
-{
- return -EIO;
-}
-#endif
-
-static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
- uint32_t acpi_method,
- uint32_t acpi_function,
- void *pinput, void *poutput,
- uint32_t output_count,
- uint32_t input_size,
- uint32_t output_size)
-{
- struct cgs_acpi_method_argument acpi_input[2] = { {0}, {0} };
- struct cgs_acpi_method_argument acpi_output = {0};
- struct cgs_acpi_method_info info = {0};
-
- acpi_input[0].type = CGS_ACPI_TYPE_INTEGER;
- acpi_input[0].data_length = sizeof(uint32_t);
- acpi_input[0].value = acpi_function;
-
- acpi_input[1].type = CGS_ACPI_TYPE_BUFFER;
- acpi_input[1].data_length = input_size;
- acpi_input[1].pointer = pinput;
-
- acpi_output.type = CGS_ACPI_TYPE_BUFFER;
- acpi_output.data_length = output_size;
- acpi_output.pointer = poutput;
-
- info.size = sizeof(struct cgs_acpi_method_info);
- info.field = CGS_ACPI_FIELD_METHOD_NAME | CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT;
- info.input_count = 2;
- info.name = acpi_method;
- info.pinput_argument = acpi_input;
- info.output_count = output_count;
- info.poutput_argument = &acpi_output;
-
- return amdgpu_cgs_acpi_eval_object(cgs_device, &info);
-}
-
static const struct cgs_ops amdgpu_cgs_ops = {
- .alloc_gpu_mem = amdgpu_cgs_alloc_gpu_mem,
- .free_gpu_mem = amdgpu_cgs_free_gpu_mem,
- .gmap_gpu_mem = amdgpu_cgs_gmap_gpu_mem,
- .gunmap_gpu_mem = amdgpu_cgs_gunmap_gpu_mem,
- .kmap_gpu_mem = amdgpu_cgs_kmap_gpu_mem,
- .kunmap_gpu_mem = amdgpu_cgs_kunmap_gpu_mem,
.read_register = amdgpu_cgs_read_register,
.write_register = amdgpu_cgs_write_register,
.read_ind_register = amdgpu_cgs_read_ind_register,
.write_ind_register = amdgpu_cgs_write_ind_register,
- .get_pci_resource = amdgpu_cgs_get_pci_resource,
- .atom_get_data_table = amdgpu_cgs_atom_get_data_table,
- .atom_get_cmd_table_revs = amdgpu_cgs_atom_get_cmd_table_revs,
- .atom_exec_cmd_table = amdgpu_cgs_atom_exec_cmd_table,
.get_firmware_info = amdgpu_cgs_get_firmware_info,
- .rel_firmware = amdgpu_cgs_rel_firmware,
- .set_powergating_state = amdgpu_cgs_set_powergating_state,
- .set_clockgating_state = amdgpu_cgs_set_clockgating_state,
- .get_active_displays_info = amdgpu_cgs_get_active_displays_info,
- .notify_dpm_enabled = amdgpu_cgs_notify_dpm_enabled,
- .call_acpi_method = amdgpu_cgs_call_acpi_method,
- .query_system_info = amdgpu_cgs_query_system_info,
- .is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled,
- .enter_safe_mode = amdgpu_cgs_enter_safe_mode,
-};
-
-static const struct cgs_os_ops amdgpu_cgs_os_ops = {
- .add_irq_source = amdgpu_cgs_add_irq_source,
- .irq_get = amdgpu_cgs_irq_get,
- .irq_put = amdgpu_cgs_irq_put
};
struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
@@ -1130,7 +407,6 @@ struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev)
}
cgs_device->base.ops = &amdgpu_cgs_ops;
- cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
cgs_device->adev = adev;
return (struct cgs_device *)cgs_device;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 8d1cf2d3e663..47e9bfba0642 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -23,10 +23,12 @@
* Authors: Dave Airlie
* Alex Deucher
*/
-#include <drm/drmP.h>
-#include <drm/drm_edid.h>
+
+#include <drm/display/drm_dp_helper.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "atom.h"
@@ -34,13 +36,14 @@
#include "atombios_dp.h"
#include "amdgpu_connectors.h"
#include "amdgpu_i2c.h"
+#include "amdgpu_display.h"
#include <linux/pm_runtime.h>
void amdgpu_connector_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
/* bail if the connector does not have hpd pin, e.g.,
@@ -69,25 +72,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
/* don't do anything if sink is not display port, i.e.,
* passive dp->(dvi|hdmi) adaptor
*/
- if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
- int saved_dpms = connector->dpms;
- /* Only turn off the display if it's physically disconnected */
- if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
- /* Don't try to start link training before we
- * have the dpcd */
- if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
- return;
-
- /* set it to OFF so that drm_helper_connector_dpms()
- * won't return immediately since the current state
- * is ON at this point.
- */
- connector->dpms = DRM_MODE_DPMS_OFF;
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- }
- connector->dpms = saved_dpms;
+ if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
+ amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) &&
+ amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
+ /* Don't start link training before we have the DPCD */
+ if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
+ return;
+
+ /* Turn the connector off and back on immediately, which
+ * will trigger link training
+ */
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
}
}
@@ -107,13 +103,13 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *dig_connector;
int bpc = 8;
- unsigned mode_clock, max_tmds_clock;
+ unsigned int mode_clock, max_tmds_clock;
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB:
if (amdgpu_connector->use_digital) {
- if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+ if (connector->display_info.is_hdmi) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
@@ -121,7 +117,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
break;
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
- if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+ if (connector->display_info.is_hdmi) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
@@ -130,7 +126,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
dig_connector = amdgpu_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) ||
- drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+ connector->display_info.is_hdmi) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
@@ -154,7 +150,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
break;
}
- if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+ if (connector->display_info.is_hdmi) {
/*
* Pre DCE-8 hw can't handle > 12 bpc, and more than 12 bpc doesn't make
* much sense without support for > 12 bpc framebuffers. RGB 4:4:4 at
@@ -180,7 +176,7 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
/* Check if bpc is within clock limit. Try to degrade gracefully otherwise */
if ((bpc == 12) && (mode_clock * 3/2 > max_tmds_clock)) {
- if ((connector->display_info.edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30) &&
+ if ((connector->display_info.edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_30) &&
(mode_clock * 5/4 <= max_tmds_clock))
bpc = 10;
else
@@ -219,30 +215,20 @@ static void
amdgpu_connector_update_scratch_regs(struct drm_connector *connector,
enum drm_connector_status status)
{
- struct drm_encoder *best_encoder = NULL;
- struct drm_encoder *encoder = NULL;
+ struct drm_encoder *best_encoder;
+ struct drm_encoder *encoder;
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
bool connected;
- int i;
best_encoder = connector_funcs->best_encoder(connector);
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev,
- connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if ((encoder == best_encoder) && (status == connector_status_connected))
connected = true;
else
connected = false;
amdgpu_atombios_encoder_set_bios_scratch_regs(connector, encoder, connected);
-
}
}
@@ -251,58 +237,25 @@ amdgpu_connector_find_encoder(struct drm_connector *connector,
int encoder_type)
{
struct drm_encoder *encoder;
- int i;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
- encoder = drm_encoder_find(connector->dev,
- connector->encoder_ids[i]);
- if (!encoder)
- continue;
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->encoder_type == encoder_type)
return encoder;
}
- return NULL;
-}
-
-struct edid *amdgpu_connector_edid(struct drm_connector *connector)
-{
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- struct drm_property_blob *edid_blob = connector->edid_blob_ptr;
- if (amdgpu_connector->edid) {
- return amdgpu_connector->edid;
- } else if (edid_blob) {
- struct edid *edid = kmemdup(edid_blob->data, edid_blob->length, GFP_KERNEL);
- if (edid)
- amdgpu_connector->edid = edid;
- }
- return amdgpu_connector->edid;
+ return NULL;
}
static struct edid *
amdgpu_connector_get_hardcoded_edid(struct amdgpu_device *adev)
{
- struct edid *edid;
-
- if (adev->mode_info.bios_hardcoded_edid) {
- edid = kmalloc(adev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL);
- if (edid) {
- memcpy((unsigned char *)edid,
- (unsigned char *)adev->mode_info.bios_hardcoded_edid,
- adev->mode_info.bios_hardcoded_edid_size);
- return edid;
- }
- }
- return NULL;
+ return drm_edid_duplicate(drm_edid_raw(adev->mode_info.bios_hardcoded_edid));
}
static void amdgpu_connector_get_edid(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->edid)
@@ -337,8 +290,10 @@ static void amdgpu_connector_get_edid(struct drm_connector *connector)
if (!amdgpu_connector->edid) {
/* some laptops provide a hardcoded edid in rom for LCDs */
if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ||
- (connector->connector_type == DRM_MODE_CONNECTOR_eDP)))
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP))) {
amdgpu_connector->edid = amdgpu_connector_get_hardcoded_edid(adev);
+ drm_connector_update_edid_property(connector, amdgpu_connector->edid);
+ }
}
}
@@ -346,10 +301,8 @@ static void amdgpu_connector_free_edid(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- if (amdgpu_connector->edid) {
- kfree(amdgpu_connector->edid);
- amdgpu_connector->edid = NULL;
- }
+ kfree(amdgpu_connector->edid);
+ amdgpu_connector->edid = NULL;
}
static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
@@ -358,23 +311,23 @@ static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
int ret;
if (amdgpu_connector->edid) {
- drm_mode_connector_update_edid_property(connector, amdgpu_connector->edid);
+ drm_connector_update_edid_property(connector, amdgpu_connector->edid);
ret = drm_add_edid_modes(connector, amdgpu_connector->edid);
- drm_edid_to_eld(connector, amdgpu_connector->edid);
return ret;
}
- drm_mode_connector_update_edid_property(connector, NULL);
+ drm_connector_update_edid_property(connector, NULL);
return 0;
}
static struct drm_encoder *
amdgpu_connector_best_single_encoder(struct drm_connector *connector)
{
- int enc_id = connector->encoder_ids[0];
+ struct drm_encoder *encoder;
+
+ /* pick the first one */
+ drm_connector_for_each_possible_encoder(connector, encoder)
+ return encoder;
- /* pick the encoder ids */
- if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
return NULL;
}
@@ -411,6 +364,9 @@ amdgpu_connector_lcd_native_mode(struct drm_encoder *encoder)
native_mode->vdisplay != 0 &&
native_mode->clock != 0) {
mode = drm_mode_duplicate(dev, native_mode);
+ if (!mode)
+ return NULL;
+
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
drm_mode_set_name(mode);
@@ -425,6 +381,9 @@ amdgpu_connector_lcd_native_mode(struct drm_encoder *encoder)
* simpler.
*/
mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
+ if (!mode)
+ return NULL;
+
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name);
}
@@ -439,30 +398,28 @@ static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder,
struct drm_display_mode *mode = NULL;
struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
int i;
- static const struct mode_size {
+ int n;
+ struct mode_size {
+ char name[DRM_DISPLAY_MODE_LEN];
int w;
int h;
- } common_modes[17] = {
- { 640, 480},
- { 720, 480},
- { 800, 600},
- { 848, 480},
- {1024, 768},
- {1152, 768},
- {1280, 720},
- {1280, 800},
- {1280, 854},
- {1280, 960},
- {1280, 1024},
- {1440, 900},
- {1400, 1050},
- {1680, 1050},
- {1600, 1200},
- {1920, 1080},
- {1920, 1200}
+ } common_modes[] = {
+ { "640x480", 640, 480},
+ { "800x600", 800, 600},
+ { "1024x768", 1024, 768},
+ { "1280x720", 1280, 720},
+ { "1280x800", 1280, 800},
+ {"1280x1024", 1280, 1024},
+ { "1440x900", 1440, 900},
+ {"1680x1050", 1680, 1050},
+ {"1600x1200", 1600, 1200},
+ {"1920x1080", 1920, 1080},
+ {"1920x1200", 1920, 1200}
};
- for (i = 0; i < 17; i++) {
+ n = ARRAY_SIZE(common_modes);
+
+ for (i = 0; i < n; i++) {
if (amdgpu_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) {
if (common_modes[i].w > 1024 ||
common_modes[i].h > 768)
@@ -475,10 +432,12 @@ static void amdgpu_connector_add_common_modes(struct drm_encoder *encoder,
common_modes[i].h == native_mode->vdisplay))
continue;
}
- if (common_modes[i].w < 320 || common_modes[i].h < 200)
- continue;
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
+ if (!mode)
+ return;
+ strscpy(mode->name, common_modes[i].name, DRM_DISPLAY_MODE_LEN);
+
drm_mode_probed_add(connector, mode);
}
}
@@ -488,7 +447,7 @@ static int amdgpu_connector_set_property(struct drm_connector *connector,
uint64_t val)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
@@ -603,16 +562,26 @@ static int amdgpu_connector_set_property(struct drm_connector *connector,
amdgpu_encoder = to_amdgpu_encoder(connector->encoder);
} else {
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+
amdgpu_encoder = to_amdgpu_encoder(connector_funcs->best_encoder(connector));
}
switch (val) {
default:
- case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break;
- case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break;
- case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break;
- case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break;
+ case DRM_MODE_SCALE_NONE:
+ rmx_type = RMX_OFF;
+ break;
+ case DRM_MODE_SCALE_CENTER:
+ rmx_type = RMX_CENTER;
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ rmx_type = RMX_ASPECT;
+ break;
+ case DRM_MODE_SCALE_FULLSCREEN:
+ rmx_type = RMX_FULL;
+ break;
}
+
if (amdgpu_encoder->rmx_type == rmx_type)
return 0;
@@ -641,7 +610,7 @@ amdgpu_connector_fixup_lcd_native_mode(struct drm_encoder *encoder,
if (mode->type & DRM_MODE_TYPE_PREFERRED) {
if (mode->hdisplay != native_mode->hdisplay ||
mode->vdisplay != native_mode->vdisplay)
- memcpy(native_mode, mode, sizeof(*mode));
+ drm_mode_copy(native_mode, mode);
}
}
@@ -650,7 +619,7 @@ amdgpu_connector_fixup_lcd_native_mode(struct drm_encoder *encoder,
list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
if (mode->hdisplay == native_mode->hdisplay &&
mode->vdisplay == native_mode->vdisplay) {
- *native_mode = *mode;
+ drm_mode_copy(native_mode, mode);
drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V);
DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n");
break;
@@ -701,8 +670,8 @@ static int amdgpu_connector_lvds_get_modes(struct drm_connector *connector)
return ret;
}
-static int amdgpu_connector_lvds_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status amdgpu_connector_lvds_mode_valid(struct drm_connector *connector,
+ const struct drm_display_mode *mode)
{
struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
@@ -739,9 +708,13 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ return connector_status_disconnected;
+ }
+ }
if (encoder) {
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
@@ -760,8 +733,12 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
/* check acpi lid status ??? */
amdgpu_connector_update_scratch_regs(connector, ret);
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
+
return ret;
}
@@ -802,16 +779,26 @@ static int amdgpu_connector_set_lcd_property(struct drm_connector *connector,
amdgpu_encoder = to_amdgpu_encoder(connector->encoder);
else {
const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private;
+
amdgpu_encoder = to_amdgpu_encoder(connector_funcs->best_encoder(connector));
}
switch (value) {
- case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break;
- case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break;
- case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break;
+ case DRM_MODE_SCALE_NONE:
+ rmx_type = RMX_OFF;
+ break;
+ case DRM_MODE_SCALE_CENTER:
+ rmx_type = RMX_CENTER;
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ rmx_type = RMX_ASPECT;
+ break;
default:
- case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break;
+ case DRM_MODE_SCALE_FULLSCREEN:
+ rmx_type = RMX_FULL;
+ break;
}
+
if (amdgpu_encoder->rmx_type == rmx_type)
return 0;
@@ -843,15 +830,16 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector)
amdgpu_connector_get_edid(connector);
ret = amdgpu_connector_ddc_get_modes(connector);
+ amdgpu_get_native_mode(connector);
return ret;
}
-static int amdgpu_connector_vga_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status amdgpu_connector_vga_mode_valid(struct drm_connector *connector,
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
/* XXX check mode bandwidth */
@@ -871,16 +859,20 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ return connector_status_disconnected;
+ }
+ }
encoder = amdgpu_connector_best_single_encoder(connector);
if (!encoder)
ret = connector_status_disconnected;
if (amdgpu_connector->ddc_bus)
- dret = amdgpu_ddc_probe(amdgpu_connector, false);
+ dret = amdgpu_display_ddc_probe(amdgpu_connector, false);
if (dret) {
amdgpu_connector->detected_by_load = false;
amdgpu_connector_free_edid(connector);
@@ -927,8 +919,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
@@ -952,7 +946,7 @@ static bool
amdgpu_connector_check_hpd_status_unchanged(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
enum drm_connector_status status;
@@ -968,6 +962,41 @@ amdgpu_connector_check_hpd_status_unchanged(struct drm_connector *connector)
return false;
}
+static void amdgpu_connector_shared_ddc(enum drm_connector_status *status,
+ struct drm_connector *connector,
+ struct amdgpu_connector *amdgpu_connector)
+{
+ struct drm_connector *list_connector;
+ struct drm_connector_list_iter iter;
+ struct amdgpu_connector *list_amdgpu_connector;
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+
+ if (amdgpu_connector->shared_ddc && *status == connector_status_connected) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(list_connector,
+ &iter) {
+ if (connector == list_connector)
+ continue;
+ list_amdgpu_connector = to_amdgpu_connector(list_connector);
+ if (list_amdgpu_connector->shared_ddc &&
+ list_amdgpu_connector->ddc_bus->rec.i2c_id ==
+ amdgpu_connector->ddc_bus->rec.i2c_id) {
+ /* cases where both connectors are digital */
+ if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) {
+ /* hpd is our only option in this case */
+ if (!amdgpu_display_hpd_sense(adev,
+ amdgpu_connector->hpd.hpd)) {
+ amdgpu_connector_free_edid(connector);
+ *status = connector_status_disconnected;
+ }
+ }
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+ }
+}
+
/*
* DVI is complicated
* Do a DDC probe, if DDC probe passes, get the full EDID so
@@ -983,25 +1012,48 @@ static enum drm_connector_status
amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- struct drm_encoder *encoder = NULL;
const struct drm_encoder_helper_funcs *encoder_funcs;
- int i, r;
+ int r;
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ return connector_status_disconnected;
+ }
+ }
+
+ if (amdgpu_connector->detected_hpd_without_ddc) {
+ force = true;
+ amdgpu_connector->detected_hpd_without_ddc = false;
+ }
if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
ret = connector->status;
goto exit;
}
- if (amdgpu_connector->ddc_bus)
- dret = amdgpu_ddc_probe(amdgpu_connector, false);
+ if (amdgpu_connector->ddc_bus) {
+ dret = amdgpu_display_ddc_probe(amdgpu_connector, false);
+
+ /* Sometimes the pins required for the DDC probe on DVI
+ * connectors don't make contact at the same time that the ones
+ * for HPD do. If the DDC probe fails even though we had an HPD
+ * signal, try again later
+ */
+ if (!dret && !force &&
+ amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
+ DRM_DEBUG_KMS("hpd detected without ddc, retrying in 1 second\n");
+ amdgpu_connector->detected_hpd_without_ddc = true;
+ schedule_delayed_work(&adev->hotplug_work,
+ msecs_to_jiffies(1000));
+ goto exit;
+ }
+ }
if (dret) {
amdgpu_connector->detected_by_load = false;
amdgpu_connector_free_edid(connector);
@@ -1031,27 +1083,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
* DDC line. The latter is more complex because with DVI<->HDMI adapters
* you don't really know what's connected to which port as both are digital.
*/
- if (amdgpu_connector->shared_ddc && (ret == connector_status_connected)) {
- struct drm_connector *list_connector;
- struct amdgpu_connector *list_amdgpu_connector;
- list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
- if (connector == list_connector)
- continue;
- list_amdgpu_connector = to_amdgpu_connector(list_connector);
- if (list_amdgpu_connector->shared_ddc &&
- (list_amdgpu_connector->ddc_bus->rec.i2c_id ==
- amdgpu_connector->ddc_bus->rec.i2c_id)) {
- /* cases where both connectors are digital */
- if (list_connector->connector_type != DRM_MODE_CONNECTOR_VGA) {
- /* hpd is our only option in this case */
- if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
- amdgpu_connector_free_edid(connector);
- ret = connector_status_disconnected;
- }
- }
- }
- }
- }
+ amdgpu_connector_shared_ddc(&ret, connector, amdgpu_connector);
}
}
@@ -1075,14 +1107,9 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
/* find analog encoder */
if (amdgpu_connector->dac_load_detect) {
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
- if (!encoder)
- continue;
+ struct drm_encoder *encoder;
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
continue;
@@ -1104,7 +1131,8 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
/* assume digital unless load detected otherwise */
amdgpu_connector->use_digital = true;
lret = encoder_funcs->detect(encoder, connector);
- DRM_DEBUG_KMS("load_detect %x returned: %x\n",encoder->encoder_type,lret);
+ DRM_DEBUG_KMS("load_detect %x returned: %x\n",
+ encoder->encoder_type, lret);
if (lret == connector_status_connected)
amdgpu_connector->use_digital = false;
}
@@ -1118,8 +1146,10 @@ out:
amdgpu_connector_update_scratch_regs(connector, ret);
exit:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
@@ -1128,18 +1158,10 @@ exit:
static struct drm_encoder *
amdgpu_connector_dvi_encoder(struct drm_connector *connector)
{
- int enc_id = connector->encoder_ids[0];
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct drm_encoder *encoder;
- int i;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev, connector->encoder_ids[i]);
- if (!encoder)
- continue;
+ drm_connector_for_each_possible_encoder(connector, encoder) {
if (amdgpu_connector->use_digital == true) {
if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)
return encoder;
@@ -1154,43 +1176,85 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector)
/* then check use digitial */
/* pick the first one */
- if (enc_id)
- return drm_encoder_find(connector->dev, enc_id);
+ drm_connector_for_each_possible_encoder(connector, encoder)
+ return encoder;
+
return NULL;
}
static void amdgpu_connector_dvi_force(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
if (connector->force == DRM_FORCE_ON)
amdgpu_connector->use_digital = false;
if (connector->force == DRM_FORCE_ON_DIGITAL)
amdgpu_connector->use_digital = true;
}
-static int amdgpu_connector_dvi_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+/**
+ * amdgpu_max_hdmi_pixel_clock - Return max supported HDMI (TMDS) pixel clock
+ * @adev: pointer to amdgpu_device
+ *
+ * Return: maximum supported HDMI (TMDS) pixel clock in KHz.
+ */
+static int amdgpu_max_hdmi_pixel_clock(const struct amdgpu_device *adev)
+{
+ if (adev->asic_type >= CHIP_POLARIS10)
+ return 600000;
+ else if (adev->asic_type >= CHIP_TONGA)
+ return 300000;
+ else
+ return 297000;
+}
+
+/**
+ * amdgpu_connector_dvi_mode_valid - Validate a mode on DVI/HDMI connectors
+ * @connector: DRM connector to validate the mode on
+ * @mode: display mode to validate
+ *
+ * Validate the given display mode on DVI and HDMI connectors, including
+ * analog signals on DVI-I.
+ *
+ * Return: drm_mode_status indicating whether the mode is valid.
+ */
+static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector *connector,
+ const struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+ const int max_hdmi_pixel_clock = amdgpu_max_hdmi_pixel_clock(adev);
+ const int max_dvi_single_link_pixel_clock = 165000;
+ int max_digital_pixel_clock_khz;
/* XXX check mode bandwidth */
- if (amdgpu_connector->use_digital && (mode->clock > 165000)) {
- if ((amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I) ||
- (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
- (amdgpu_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B)) {
- return MODE_OK;
- } else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
- /* HDMI 1.3+ supports max clock of 340 Mhz */
- if (mode->clock > 340000)
- return MODE_CLOCK_HIGH;
- else
- return MODE_OK;
- } else {
- return MODE_CLOCK_HIGH;
+ if (amdgpu_connector->use_digital) {
+ switch (amdgpu_connector->connector_object_id) {
+ case CONNECTOR_OBJECT_ID_HDMI_TYPE_A:
+ max_digital_pixel_clock_khz = max_hdmi_pixel_clock;
+ break;
+ case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I:
+ case CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D:
+ max_digital_pixel_clock_khz = max_dvi_single_link_pixel_clock;
+ break;
+ case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I:
+ case CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D:
+ case CONNECTOR_OBJECT_ID_HDMI_TYPE_B:
+ max_digital_pixel_clock_khz = max_dvi_single_link_pixel_clock * 2;
+ break;
}
+
+ /* When the display EDID claims that it's an HDMI display,
+ * we use the HDMI encoder mode of the display HW,
+ * so we should verify against the max HDMI clock here.
+ */
+ if (connector->display_info.is_hdmi)
+ max_digital_pixel_clock_khz = max_hdmi_pixel_clock;
+
+ if (mode->clock > max_digital_pixel_clock_khz)
+ return MODE_CLOCK_HIGH;
}
/* check against the max pixel clock */
@@ -1290,17 +1354,8 @@ u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn
{
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
- int i;
-
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
-
- encoder = drm_encoder_find(connector->dev,
- connector->encoder_ids[i]);
- if (!encoder)
- continue;
+ drm_connector_for_each_possible_encoder(connector, encoder) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
switch (amdgpu_encoder->encoder_id) {
@@ -1319,17 +1374,9 @@ static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector)
{
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
- int i;
bool found = false;
- for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- if (connector->encoder_ids[i] == 0)
- break;
- encoder = drm_encoder_find(connector->dev,
- connector->encoder_ids[i]);
- if (!encoder)
- continue;
-
+ drm_connector_for_each_possible_encoder(connector, encoder) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
if (amdgpu_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2)
found = true;
@@ -1341,7 +1388,7 @@ static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector)
bool amdgpu_connector_is_dp12_capable(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
if ((adev->clock.default_dispclk >= 53900) &&
amdgpu_connector_encoder_is_hbr2(connector)) {
@@ -1355,16 +1402,20 @@ static enum drm_connector_status
amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
enum drm_connector_status ret = connector_status_disconnected;
struct amdgpu_connector_atom_dig *amdgpu_dig_connector = amdgpu_connector->con_priv;
struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ return connector_status_disconnected;
+ }
+ }
if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
ret = connector->status;
@@ -1404,10 +1455,12 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
/* setup ddc on the bridge */
amdgpu_atombios_encoder_setup_ext_encoder_ddc(encoder);
/* bridge chips are always aux */
- if (amdgpu_ddc_probe(amdgpu_connector, true)) /* try DDC */
+ /* try DDC */
+ if (amdgpu_display_ddc_probe(amdgpu_connector, true))
ret = connector_status_connected;
else if (amdgpu_connector->dac_load_detect) { /* try load detection */
const struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+
ret = encoder_funcs->detect(encoder, connector);
}
}
@@ -1424,7 +1477,8 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
ret = connector_status_connected;
} else {
/* try non-aux ddc (DP to DVI/HDMI/etc. adapter) */
- if (amdgpu_ddc_probe(amdgpu_connector, false))
+ if (amdgpu_display_ddc_probe(amdgpu_connector,
+ false))
ret = connector_status_connected;
}
}
@@ -1432,14 +1486,22 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ drm_dp_set_subconnector_property(&amdgpu_connector->base,
+ ret,
+ amdgpu_dig_connector->dpcd,
+ amdgpu_dig_connector->downstream_ports);
return ret;
}
-static int amdgpu_connector_dp_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status amdgpu_connector_dp_mode_valid(struct drm_connector *connector,
+ const struct drm_display_mode *mode)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *amdgpu_dig_connector = amdgpu_connector->con_priv;
@@ -1477,7 +1539,7 @@ static int amdgpu_connector_dp_mode_valid(struct drm_connector *connector,
(amdgpu_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
return amdgpu_atombios_dp_mode_valid_helper(connector, mode);
} else {
- if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+ if (connector->display_info.is_hdmi) {
/* HDMI 1.3+ supports max clock of 340 Mhz */
if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
@@ -1491,6 +1553,20 @@ static int amdgpu_connector_dp_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
+static int
+amdgpu_connector_late_register(struct drm_connector *connector)
+{
+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+ int r = 0;
+
+ if (amdgpu_connector->ddc_bus->has_aux) {
+ amdgpu_connector->ddc_bus->aux.dev = amdgpu_connector->base.kdev;
+ r = drm_dp_aux_register(&amdgpu_connector->ddc_bus->aux);
+ }
+
+ return r;
+}
+
static const struct drm_connector_helper_funcs amdgpu_connector_dp_helper_funcs = {
.get_modes = amdgpu_connector_dp_get_modes,
.mode_valid = amdgpu_connector_dp_mode_valid,
@@ -1505,6 +1581,7 @@ static const struct drm_connector_funcs amdgpu_connector_dp_funcs = {
.early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
+ .late_register = amdgpu_connector_late_register,
};
static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
@@ -1515,6 +1592,7 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
.early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
+ .late_register = amdgpu_connector_late_register,
};
void
@@ -1527,12 +1605,14 @@ amdgpu_connector_add(struct amdgpu_device *adev,
struct amdgpu_hpd *hpd,
struct amdgpu_router *router)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
struct amdgpu_connector_atom_dig *amdgpu_dig_connector;
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
+ struct i2c_adapter *ddc = NULL;
uint32_t subpixel_order = SubPixelNone;
bool shared_ddc = false;
bool is_dp_bridge = false;
@@ -1542,10 +1622,12 @@ amdgpu_connector_add(struct amdgpu_device *adev,
return;
/* see if we already added it */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->connector_id == connector_id) {
amdgpu_connector->devices |= supported_device;
+ drm_connector_list_iter_end(&iter);
return;
}
if (amdgpu_connector->ddc_bus && i2c_bus->valid) {
@@ -1560,6 +1642,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
}
}
}
+ drm_connector_list_iter_end(&iter);
/* check if it's a dp bridge */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -1602,17 +1685,21 @@ amdgpu_connector_add(struct amdgpu_device *adev,
amdgpu_connector->con_priv = amdgpu_dig_connector;
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
- if (amdgpu_connector->ddc_bus)
+ if (amdgpu_connector->ddc_bus) {
has_aux = true;
- else
+ ddc = &amdgpu_connector->ddc_bus->adapter;
+ } else {
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
}
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
case DRM_MODE_CONNECTOR_DVIA:
default:
- drm_connector_init(dev, &amdgpu_connector->base,
- &amdgpu_connector_dp_funcs, connector_type);
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_dp_funcs,
+ connector_type,
+ ddc);
drm_connector_helper_add(&amdgpu_connector->base,
&amdgpu_connector_dp_helper_funcs);
connector->interlace_allowed = true;
@@ -1630,8 +1717,10 @@ amdgpu_connector_add(struct amdgpu_device *adev,
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
case DRM_MODE_CONNECTOR_DisplayPort:
- drm_connector_init(dev, &amdgpu_connector->base,
- &amdgpu_connector_dp_funcs, connector_type);
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_dp_funcs,
+ connector_type,
+ ddc);
drm_connector_helper_add(&amdgpu_connector->base,
&amdgpu_connector_dp_helper_funcs);
drm_object_attach_property(&amdgpu_connector->base.base,
@@ -1652,10 +1741,12 @@ amdgpu_connector_add(struct amdgpu_device *adev,
adev->mode_info.dither_property,
AMDGPU_FMT_DITHER_DISABLE);
- if (amdgpu_audio != 0)
+ if (amdgpu_audio != 0) {
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
+ }
subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = true;
@@ -1672,8 +1763,10 @@ amdgpu_connector_add(struct amdgpu_device *adev,
break;
case DRM_MODE_CONNECTOR_LVDS:
case DRM_MODE_CONNECTOR_eDP:
- drm_connector_init(dev, &amdgpu_connector->base,
- &amdgpu_connector_edp_funcs, connector_type);
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_edp_funcs,
+ connector_type,
+ ddc);
drm_connector_helper_add(&amdgpu_connector->base,
&amdgpu_connector_dp_helper_funcs);
drm_object_attach_property(&amdgpu_connector->base.base,
@@ -1687,13 +1780,18 @@ amdgpu_connector_add(struct amdgpu_device *adev,
} else {
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
- drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_vga_funcs, connector_type);
- drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_vga_helper_funcs);
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (!amdgpu_connector->ddc_bus)
DRM_ERROR("VGA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &amdgpu_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_vga_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_vga_helper_funcs);
amdgpu_connector->dac_load_detect = true;
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.load_detect_property,
@@ -1707,13 +1805,18 @@ amdgpu_connector_add(struct amdgpu_device *adev,
connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVIA:
- drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_vga_funcs, connector_type);
- drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_vga_helper_funcs);
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (!amdgpu_connector->ddc_bus)
DRM_ERROR("DVIA: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &amdgpu_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_vga_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_vga_helper_funcs);
amdgpu_connector->dac_load_detect = true;
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.load_detect_property,
@@ -1732,13 +1835,18 @@ amdgpu_connector_add(struct amdgpu_device *adev,
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
- drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_dvi_funcs, connector_type);
- drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dvi_helper_funcs);
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (!amdgpu_connector->ddc_bus)
DRM_ERROR("DVI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &amdgpu_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_dvi_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dvi_helper_funcs);
subpixel_order = SubPixelHorizontalRGB;
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.coherent_mode_property,
@@ -1760,6 +1868,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
}
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.dither_property,
@@ -1782,13 +1891,18 @@ amdgpu_connector_add(struct amdgpu_device *adev,
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
- drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_dvi_funcs, connector_type);
- drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dvi_helper_funcs);
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (!amdgpu_connector->ddc_bus)
DRM_ERROR("HDMI: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &amdgpu_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_dvi_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dvi_helper_funcs);
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.coherent_mode_property,
1);
@@ -1808,6 +1922,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
}
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.dither_property,
@@ -1824,15 +1939,20 @@ amdgpu_connector_add(struct amdgpu_device *adev,
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
- drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_dp_funcs, connector_type);
- drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dp_helper_funcs);
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
- if (amdgpu_connector->ddc_bus)
+ if (amdgpu_connector->ddc_bus) {
has_aux = true;
- else
+ ddc = &amdgpu_connector->ddc_bus->adapter;
+ } else {
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
}
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_dp_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dp_helper_funcs);
subpixel_order = SubPixelHorizontalRGB;
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.coherent_mode_property,
@@ -1853,6 +1973,7 @@ amdgpu_connector_add(struct amdgpu_device *adev,
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.audio_property,
AMDGPU_AUDIO_AUTO);
+ amdgpu_connector->audio = AMDGPU_AUDIO_AUTO;
}
drm_object_attach_property(&amdgpu_connector->base.base,
adev->mode_info.dither_property,
@@ -1866,15 +1987,20 @@ amdgpu_connector_add(struct amdgpu_device *adev,
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
- drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_edp_funcs, connector_type);
- drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dp_helper_funcs);
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
- if (amdgpu_connector->ddc_bus)
+ if (amdgpu_connector->ddc_bus) {
has_aux = true;
- else
+ ddc = &amdgpu_connector->ddc_bus->adapter;
+ } else {
DRM_ERROR("DP: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ }
}
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_edp_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_dp_helper_funcs);
drm_object_attach_property(&amdgpu_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
@@ -1887,13 +2013,18 @@ amdgpu_connector_add(struct amdgpu_device *adev,
if (!amdgpu_dig_connector)
goto failed;
amdgpu_connector->con_priv = amdgpu_dig_connector;
- drm_connector_init(dev, &amdgpu_connector->base, &amdgpu_connector_lvds_funcs, connector_type);
- drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_lvds_helper_funcs);
if (i2c_bus->valid) {
amdgpu_connector->ddc_bus = amdgpu_i2c_lookup(adev, i2c_bus);
if (!amdgpu_connector->ddc_bus)
DRM_ERROR("LVDS: Failed to assign ddc bus! Check dmesg for i2c errors.\n");
+ else
+ ddc = &amdgpu_connector->ddc_bus->adapter;
}
+ drm_connector_init_with_ddc(dev, &amdgpu_connector->base,
+ &amdgpu_connector_lvds_funcs,
+ connector_type,
+ ddc);
+ drm_connector_helper_add(&amdgpu_connector->base, &amdgpu_connector_lvds_helper_funcs);
drm_object_attach_property(&amdgpu_connector->base.base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
@@ -1907,17 +2038,21 @@ amdgpu_connector_add(struct amdgpu_device *adev,
if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) {
if (i2c_bus->valid) {
connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
+ DRM_CONNECTOR_POLL_DISCONNECT;
}
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->display_info.subpixel_order = subpixel_order;
- drm_connector_register(connector);
if (has_aux)
amdgpu_atombios_dp_aux_init(amdgpu_connector);
+ if (connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector_type == DRM_MODE_CONNECTOR_eDP) {
+ drm_connector_attach_dp_subconnector_property(&amdgpu_connector->base);
+ }
+
return;
failed:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.h
index 61fcef15ad72..eff833b6ed31 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.h
@@ -24,7 +24,6 @@
#ifndef __AMDGPU_CONNECTORS_H__
#define __AMDGPU_CONNECTORS_H__
-struct edid *amdgpu_connector_edid(struct drm_connector *connector);
void amdgpu_connector_hotplug(struct drm_connector *connector);
int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector);
u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
new file mode 100644
index 000000000000..ef996493115f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c
@@ -0,0 +1,591 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/list.h>
+#include "amdgpu.h"
+
+static const guid_t MCE = CPER_NOTIFY_MCE;
+static const guid_t CMC = CPER_NOTIFY_CMC;
+static const guid_t BOOT = BOOT_TYPE;
+
+static const guid_t CRASHDUMP = AMD_CRASHDUMP;
+static const guid_t RUNTIME = AMD_GPU_NONSTANDARD_ERROR;
+
+static void __inc_entry_length(struct cper_hdr *hdr, uint32_t size)
+{
+ hdr->record_length += size;
+}
+
+static void amdgpu_cper_get_timestamp(struct cper_timestamp *timestamp)
+{
+ struct tm tm;
+ time64_t now = ktime_get_real_seconds();
+
+ time64_to_tm(now, 0, &tm);
+ timestamp->seconds = tm.tm_sec;
+ timestamp->minutes = tm.tm_min;
+ timestamp->hours = tm.tm_hour;
+ timestamp->flag = 0;
+ timestamp->day = tm.tm_mday;
+ timestamp->month = 1 + tm.tm_mon;
+ timestamp->year = (1900 + tm.tm_year) % 100;
+ timestamp->century = (1900 + tm.tm_year) / 100;
+}
+
+void amdgpu_cper_entry_fill_hdr(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ enum amdgpu_cper_type type,
+ enum cper_error_severity sev)
+{
+ char record_id[16];
+
+ hdr->signature[0] = 'C';
+ hdr->signature[1] = 'P';
+ hdr->signature[2] = 'E';
+ hdr->signature[3] = 'R';
+ hdr->revision = CPER_HDR_REV_1;
+ hdr->signature_end = 0xFFFFFFFF;
+ hdr->error_severity = sev;
+
+ hdr->valid_bits.platform_id = 1;
+ hdr->valid_bits.timestamp = 1;
+
+ amdgpu_cper_get_timestamp(&hdr->timestamp);
+
+ snprintf(record_id, 9, "%d:%X",
+ (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) ?
+ adev->smuio.funcs->get_socket_id(adev) :
+ 0,
+ atomic_inc_return(&adev->cper.unique_id));
+ memcpy(hdr->record_id, record_id, 8);
+
+ snprintf(hdr->platform_id, 16, "0x%04X:0x%04X",
+ adev->pdev->vendor, adev->pdev->device);
+ /* pmfw version should be part of creator_id according to CPER spec */
+ snprintf(hdr->creator_id, 16, "%s", CPER_CREATOR_ID_AMDGPU);
+
+ switch (type) {
+ case AMDGPU_CPER_TYPE_BOOT:
+ hdr->notify_type = BOOT;
+ break;
+ case AMDGPU_CPER_TYPE_FATAL:
+ case AMDGPU_CPER_TYPE_BP_THRESHOLD:
+ hdr->notify_type = MCE;
+ break;
+ case AMDGPU_CPER_TYPE_RUNTIME:
+ if (sev == CPER_SEV_NON_FATAL_CORRECTED)
+ hdr->notify_type = CMC;
+ else
+ hdr->notify_type = MCE;
+ break;
+ default:
+ dev_err(adev->dev, "Unknown CPER Type\n");
+ break;
+ }
+
+ __inc_entry_length(hdr, HDR_LEN);
+}
+
+static int amdgpu_cper_entry_fill_section_desc(struct amdgpu_device *adev,
+ struct cper_sec_desc *section_desc,
+ bool bp_threshold,
+ bool poison,
+ enum cper_error_severity sev,
+ guid_t sec_type,
+ uint32_t section_length,
+ uint32_t section_offset)
+{
+ section_desc->revision_minor = CPER_SEC_MINOR_REV_1;
+ section_desc->revision_major = CPER_SEC_MAJOR_REV_22;
+ section_desc->sec_offset = section_offset;
+ section_desc->sec_length = section_length;
+ section_desc->valid_bits.fru_text = 1;
+ section_desc->flag_bits.primary = 1;
+ section_desc->severity = sev;
+ section_desc->sec_type = sec_type;
+
+ snprintf(section_desc->fru_text, 20, "OAM%d",
+ (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) ?
+ adev->smuio.funcs->get_socket_id(adev) :
+ 0);
+
+ if (bp_threshold)
+ section_desc->flag_bits.exceed_err_threshold = 1;
+ if (poison)
+ section_desc->flag_bits.latent_err = 1;
+
+ return 0;
+}
+
+int amdgpu_cper_entry_fill_fatal_section(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ uint32_t idx,
+ struct cper_sec_crashdump_reg_data reg_data)
+{
+ struct cper_sec_desc *section_desc;
+ struct cper_sec_crashdump_fatal *section;
+
+ section_desc = (struct cper_sec_desc *)((uint8_t *)hdr + SEC_DESC_OFFSET(idx));
+ section = (struct cper_sec_crashdump_fatal *)((uint8_t *)hdr +
+ FATAL_SEC_OFFSET(hdr->sec_cnt, idx));
+
+ amdgpu_cper_entry_fill_section_desc(adev, section_desc, false, false,
+ CPER_SEV_FATAL, CRASHDUMP, FATAL_SEC_LEN,
+ FATAL_SEC_OFFSET(hdr->sec_cnt, idx));
+
+ section->body.reg_ctx_type = CPER_CTX_TYPE_CRASH;
+ section->body.reg_arr_size = sizeof(reg_data);
+ section->body.data = reg_data;
+
+ __inc_entry_length(hdr, SEC_DESC_LEN + FATAL_SEC_LEN);
+
+ return 0;
+}
+
+int amdgpu_cper_entry_fill_runtime_section(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ uint32_t idx,
+ enum cper_error_severity sev,
+ uint32_t *reg_dump,
+ uint32_t reg_count)
+{
+ struct cper_sec_desc *section_desc;
+ struct cper_sec_nonstd_err *section;
+ bool poison;
+
+ poison = sev != CPER_SEV_NON_FATAL_CORRECTED;
+ section_desc = (struct cper_sec_desc *)((uint8_t *)hdr + SEC_DESC_OFFSET(idx));
+ section = (struct cper_sec_nonstd_err *)((uint8_t *)hdr +
+ NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
+
+ amdgpu_cper_entry_fill_section_desc(adev, section_desc, false, poison,
+ sev, RUNTIME, NONSTD_SEC_LEN,
+ NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
+
+ reg_count = umin(reg_count, CPER_ACA_REG_COUNT);
+
+ section->hdr.valid_bits.err_info_cnt = 1;
+ section->hdr.valid_bits.err_context_cnt = 1;
+
+ section->info.error_type = RUNTIME;
+ section->info.ms_chk_bits.err_type_valid = 1;
+ section->ctx.reg_ctx_type = CPER_CTX_TYPE_CRASH;
+ section->ctx.reg_arr_size = sizeof(section->ctx.reg_dump);
+
+ memcpy(section->ctx.reg_dump, reg_dump, reg_count * sizeof(uint32_t));
+
+ __inc_entry_length(hdr, SEC_DESC_LEN + NONSTD_SEC_LEN);
+
+ return 0;
+}
+
+int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ uint32_t idx)
+{
+ struct cper_sec_desc *section_desc;
+ struct cper_sec_nonstd_err *section;
+ uint32_t socket_id;
+
+ section_desc = (struct cper_sec_desc *)((uint8_t *)hdr + SEC_DESC_OFFSET(idx));
+ section = (struct cper_sec_nonstd_err *)((uint8_t *)hdr +
+ NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
+
+ amdgpu_cper_entry_fill_section_desc(adev, section_desc, true, false,
+ CPER_SEV_FATAL, RUNTIME, NONSTD_SEC_LEN,
+ NONSTD_SEC_OFFSET(hdr->sec_cnt, idx));
+
+ section->hdr.valid_bits.err_info_cnt = 1;
+ section->hdr.valid_bits.err_context_cnt = 1;
+
+ section->info.error_type = RUNTIME;
+ section->info.valid_bits.ms_chk = 1;
+ section->info.ms_chk_bits.err_type_valid = 1;
+ section->info.ms_chk_bits.err_type = 1;
+ section->info.ms_chk_bits.pcc = 1;
+ section->ctx.reg_ctx_type = CPER_CTX_TYPE_CRASH;
+ section->ctx.reg_arr_size = sizeof(section->ctx.reg_dump);
+
+ /* Hardcoded Reg dump for bad page threshold CPER */
+ socket_id = (adev->smuio.funcs && adev->smuio.funcs->get_socket_id) ?
+ adev->smuio.funcs->get_socket_id(adev) :
+ 0;
+ section->ctx.reg_dump[CPER_ACA_REG_CTL_LO] = 0x1;
+ section->ctx.reg_dump[CPER_ACA_REG_CTL_HI] = 0x0;
+ section->ctx.reg_dump[CPER_ACA_REG_STATUS_LO] = 0x137;
+ section->ctx.reg_dump[CPER_ACA_REG_STATUS_HI] = 0xB0000000;
+ section->ctx.reg_dump[CPER_ACA_REG_ADDR_LO] = 0x0;
+ section->ctx.reg_dump[CPER_ACA_REG_ADDR_HI] = 0x0;
+ section->ctx.reg_dump[CPER_ACA_REG_MISC0_LO] = 0x0;
+ section->ctx.reg_dump[CPER_ACA_REG_MISC0_HI] = 0x0;
+ section->ctx.reg_dump[CPER_ACA_REG_CONFIG_LO] = 0x2;
+ section->ctx.reg_dump[CPER_ACA_REG_CONFIG_HI] = 0x1ff;
+ section->ctx.reg_dump[CPER_ACA_REG_IPID_LO] = (socket_id / 4) & 0x01;
+ section->ctx.reg_dump[CPER_ACA_REG_IPID_HI] = 0x096 | (((socket_id % 4) & 0x3) << 12);
+ section->ctx.reg_dump[CPER_ACA_REG_SYND_LO] = 0x0;
+ section->ctx.reg_dump[CPER_ACA_REG_SYND_HI] = 0x0;
+
+ __inc_entry_length(hdr, SEC_DESC_LEN + NONSTD_SEC_LEN);
+
+ return 0;
+}
+
+struct cper_hdr *amdgpu_cper_alloc_entry(struct amdgpu_device *adev,
+ enum amdgpu_cper_type type,
+ uint16_t section_count)
+{
+ struct cper_hdr *hdr;
+ uint32_t size = 0;
+
+ size += HDR_LEN;
+ size += (SEC_DESC_LEN * section_count);
+
+ switch (type) {
+ case AMDGPU_CPER_TYPE_RUNTIME:
+ case AMDGPU_CPER_TYPE_BP_THRESHOLD:
+ size += (NONSTD_SEC_LEN * section_count);
+ break;
+ case AMDGPU_CPER_TYPE_FATAL:
+ size += (FATAL_SEC_LEN * section_count);
+ break;
+ case AMDGPU_CPER_TYPE_BOOT:
+ size += (BOOT_SEC_LEN * section_count);
+ break;
+ default:
+ dev_err(adev->dev, "Unknown CPER Type!\n");
+ return NULL;
+ }
+
+ hdr = kzalloc(size, GFP_KERNEL);
+ if (!hdr)
+ return NULL;
+
+ /* Save this early */
+ hdr->sec_cnt = section_count;
+
+ return hdr;
+}
+
+int amdgpu_cper_generate_ue_record(struct amdgpu_device *adev,
+ struct aca_bank *bank)
+{
+ struct cper_hdr *fatal = NULL;
+ struct cper_sec_crashdump_reg_data reg_data = { 0 };
+ struct amdgpu_ring *ring = &adev->cper.ring_buf;
+ int ret;
+
+ fatal = amdgpu_cper_alloc_entry(adev, AMDGPU_CPER_TYPE_FATAL, 1);
+ if (!fatal) {
+ dev_err(adev->dev, "fail to alloc cper entry for ue record\n");
+ return -ENOMEM;
+ }
+
+ reg_data.status_lo = lower_32_bits(bank->regs[ACA_REG_IDX_STATUS]);
+ reg_data.status_hi = upper_32_bits(bank->regs[ACA_REG_IDX_STATUS]);
+ reg_data.addr_lo = lower_32_bits(bank->regs[ACA_REG_IDX_ADDR]);
+ reg_data.addr_hi = upper_32_bits(bank->regs[ACA_REG_IDX_ADDR]);
+ reg_data.ipid_lo = lower_32_bits(bank->regs[ACA_REG_IDX_IPID]);
+ reg_data.ipid_hi = upper_32_bits(bank->regs[ACA_REG_IDX_IPID]);
+ reg_data.synd_lo = lower_32_bits(bank->regs[ACA_REG_IDX_SYND]);
+ reg_data.synd_hi = upper_32_bits(bank->regs[ACA_REG_IDX_SYND]);
+
+ amdgpu_cper_entry_fill_hdr(adev, fatal, AMDGPU_CPER_TYPE_FATAL, CPER_SEV_FATAL);
+ ret = amdgpu_cper_entry_fill_fatal_section(adev, fatal, 0, reg_data);
+ if (ret)
+ return ret;
+
+ amdgpu_cper_ring_write(ring, fatal, fatal->record_length);
+ kfree(fatal);
+
+ return 0;
+}
+
+int amdgpu_cper_generate_bp_threshold_record(struct amdgpu_device *adev)
+{
+ struct cper_hdr *bp_threshold = NULL;
+ struct amdgpu_ring *ring = &adev->cper.ring_buf;
+ int ret;
+
+ bp_threshold = amdgpu_cper_alloc_entry(adev, AMDGPU_CPER_TYPE_BP_THRESHOLD, 1);
+ if (!bp_threshold) {
+ dev_err(adev->dev, "fail to alloc cper entry for bad page threshold record\n");
+ return -ENOMEM;
+ }
+
+ amdgpu_cper_entry_fill_hdr(adev, bp_threshold,
+ AMDGPU_CPER_TYPE_BP_THRESHOLD,
+ CPER_SEV_FATAL);
+ ret = amdgpu_cper_entry_fill_bad_page_threshold_section(adev, bp_threshold, 0);
+ if (ret)
+ return ret;
+
+ amdgpu_cper_ring_write(ring, bp_threshold, bp_threshold->record_length);
+ kfree(bp_threshold);
+
+ return 0;
+}
+
+static enum cper_error_severity amdgpu_aca_err_type_to_cper_sev(struct amdgpu_device *adev,
+ enum aca_error_type aca_err_type)
+{
+ switch (aca_err_type) {
+ case ACA_ERROR_TYPE_UE:
+ return CPER_SEV_FATAL;
+ case ACA_ERROR_TYPE_CE:
+ return CPER_SEV_NON_FATAL_CORRECTED;
+ case ACA_ERROR_TYPE_DEFERRED:
+ return CPER_SEV_NON_FATAL_UNCORRECTED;
+ default:
+ dev_err(adev->dev, "Unknown ACA error type!\n");
+ return CPER_SEV_FATAL;
+ }
+}
+
+int amdgpu_cper_generate_ce_records(struct amdgpu_device *adev,
+ struct aca_banks *banks,
+ uint16_t bank_count)
+{
+ struct cper_hdr *corrected = NULL;
+ enum cper_error_severity sev = CPER_SEV_NON_FATAL_CORRECTED;
+ struct amdgpu_ring *ring = &adev->cper.ring_buf;
+ uint32_t reg_data[CPER_ACA_REG_COUNT] = { 0 };
+ struct aca_bank_node *node;
+ struct aca_bank *bank;
+ uint32_t i = 0;
+ int ret;
+
+ corrected = amdgpu_cper_alloc_entry(adev, AMDGPU_CPER_TYPE_RUNTIME, bank_count);
+ if (!corrected) {
+ dev_err(adev->dev, "fail to allocate cper entry for ce records\n");
+ return -ENOMEM;
+ }
+
+ /* Raise severity if any DE is detected in the ACA bank list */
+ list_for_each_entry(node, &banks->list, node) {
+ bank = &node->bank;
+ if (bank->aca_err_type == ACA_ERROR_TYPE_DEFERRED) {
+ sev = CPER_SEV_NON_FATAL_UNCORRECTED;
+ break;
+ }
+ }
+
+ amdgpu_cper_entry_fill_hdr(adev, corrected, AMDGPU_CPER_TYPE_RUNTIME, sev);
+
+ /* Combine CE and DE in cper record */
+ list_for_each_entry(node, &banks->list, node) {
+ bank = &node->bank;
+ reg_data[CPER_ACA_REG_CTL_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_CTL]);
+ reg_data[CPER_ACA_REG_CTL_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_CTL]);
+ reg_data[CPER_ACA_REG_STATUS_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_STATUS]);
+ reg_data[CPER_ACA_REG_STATUS_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_STATUS]);
+ reg_data[CPER_ACA_REG_ADDR_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_ADDR]);
+ reg_data[CPER_ACA_REG_ADDR_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_ADDR]);
+ reg_data[CPER_ACA_REG_MISC0_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_MISC0]);
+ reg_data[CPER_ACA_REG_MISC0_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_MISC0]);
+ reg_data[CPER_ACA_REG_CONFIG_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_CONFIG]);
+ reg_data[CPER_ACA_REG_CONFIG_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_CONFIG]);
+ reg_data[CPER_ACA_REG_IPID_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_IPID]);
+ reg_data[CPER_ACA_REG_IPID_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_IPID]);
+ reg_data[CPER_ACA_REG_SYND_LO] = lower_32_bits(bank->regs[ACA_REG_IDX_SYND]);
+ reg_data[CPER_ACA_REG_SYND_HI] = upper_32_bits(bank->regs[ACA_REG_IDX_SYND]);
+
+ ret = amdgpu_cper_entry_fill_runtime_section(adev, corrected, i++,
+ amdgpu_aca_err_type_to_cper_sev(adev, bank->aca_err_type),
+ reg_data, CPER_ACA_REG_COUNT);
+ if (ret)
+ return ret;
+ }
+
+ amdgpu_cper_ring_write(ring, corrected, corrected->record_length);
+ kfree(corrected);
+
+ return 0;
+}
+
+static bool amdgpu_cper_is_hdr(struct amdgpu_ring *ring, u64 pos)
+{
+ struct cper_hdr *chdr;
+
+ chdr = (struct cper_hdr *)&(ring->ring[pos]);
+ return strcmp(chdr->signature, "CPER") ? false : true;
+}
+
+static u32 amdgpu_cper_ring_get_ent_sz(struct amdgpu_ring *ring, u64 pos)
+{
+ struct cper_hdr *chdr;
+ u64 p;
+ u32 chunk, rec_len = 0;
+
+ chdr = (struct cper_hdr *)&(ring->ring[pos]);
+ chunk = ring->ring_size - (pos << 2);
+
+ if (!strcmp(chdr->signature, "CPER")) {
+ rec_len = chdr->record_length;
+ goto calc;
+ }
+
+ /* ring buffer is not full, no cper data after ring->wptr */
+ if (ring->count_dw)
+ goto calc;
+
+ for (p = pos + 1; p <= ring->buf_mask; p++) {
+ chdr = (struct cper_hdr *)&(ring->ring[p]);
+ if (!strcmp(chdr->signature, "CPER")) {
+ rec_len = (p - pos) << 2;
+ goto calc;
+ }
+ }
+
+calc:
+ if (!rec_len)
+ return chunk;
+ else
+ return umin(rec_len, chunk);
+}
+
+void amdgpu_cper_ring_write(struct amdgpu_ring *ring, void *src, int count)
+{
+ u64 pos, wptr_old, rptr;
+ int rec_cnt_dw = count >> 2;
+ u32 chunk, ent_sz;
+ u8 *s = (u8 *)src;
+
+ if (count >= ring->ring_size - 4) {
+ dev_err(ring->adev->dev,
+ "CPER data size(%d) is larger than ring size(%d)\n",
+ count, ring->ring_size - 4);
+
+ return;
+ }
+
+ mutex_lock(&ring->adev->cper.ring_lock);
+
+ wptr_old = ring->wptr;
+ rptr = *ring->rptr_cpu_addr & ring->ptr_mask;
+
+ while (count) {
+ ent_sz = amdgpu_cper_ring_get_ent_sz(ring, ring->wptr);
+ chunk = umin(ent_sz, count);
+
+ memcpy(&ring->ring[ring->wptr], s, chunk);
+
+ ring->wptr += (chunk >> 2);
+ ring->wptr &= ring->ptr_mask;
+ count -= chunk;
+ s += chunk;
+ }
+
+ if (ring->count_dw < rec_cnt_dw)
+ ring->count_dw = 0;
+
+ /* the buffer is overflow, adjust rptr */
+ if (((wptr_old < rptr) && (rptr <= ring->wptr)) ||
+ ((ring->wptr < wptr_old) && (wptr_old < rptr)) ||
+ ((rptr <= ring->wptr) && (ring->wptr < wptr_old))) {
+ pos = (ring->wptr + 1) & ring->ptr_mask;
+
+ do {
+ ent_sz = amdgpu_cper_ring_get_ent_sz(ring, pos);
+
+ rptr += (ent_sz >> 2);
+ rptr &= ring->ptr_mask;
+ *ring->rptr_cpu_addr = rptr;
+
+ pos = rptr;
+ } while (!amdgpu_cper_is_hdr(ring, rptr));
+ }
+
+ if (ring->count_dw >= rec_cnt_dw)
+ ring->count_dw -= rec_cnt_dw;
+ mutex_unlock(&ring->adev->cper.ring_lock);
+}
+
+static u64 amdgpu_cper_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ return *(ring->rptr_cpu_addr);
+}
+
+static u64 amdgpu_cper_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ return ring->wptr;
+}
+
+static const struct amdgpu_ring_funcs cper_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_CPER,
+ .align_mask = 0xff,
+ .support_64bit_ptrs = false,
+ .get_rptr = amdgpu_cper_ring_get_rptr,
+ .get_wptr = amdgpu_cper_ring_get_wptr,
+};
+
+static int amdgpu_cper_ring_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring = &(adev->cper.ring_buf);
+
+ mutex_init(&adev->cper.ring_lock);
+
+ ring->adev = NULL;
+ ring->ring_obj = NULL;
+ ring->use_doorbell = false;
+ ring->no_scheduler = true;
+ ring->funcs = &cper_ring_funcs;
+
+ sprintf(ring->name, "cper");
+ return amdgpu_ring_init(adev, ring, CPER_MAX_RING_SIZE, NULL, 0,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
+}
+
+int amdgpu_cper_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (!amdgpu_aca_is_enabled(adev) && !amdgpu_sriov_ras_cper_en(adev))
+ return 0;
+
+ r = amdgpu_cper_ring_init(adev);
+ if (r) {
+ dev_err(adev->dev, "failed to initialize cper ring, r = %d\n", r);
+ return r;
+ }
+
+ mutex_init(&adev->cper.cper_lock);
+
+ adev->cper.enabled = true;
+ adev->cper.max_count = CPER_MAX_ALLOWED_COUNT;
+
+ return 0;
+}
+
+int amdgpu_cper_fini(struct amdgpu_device *adev)
+{
+ if (!amdgpu_aca_is_enabled(adev) && !amdgpu_sriov_ras_cper_en(adev))
+ return 0;
+
+ adev->cper.enabled = false;
+
+ amdgpu_ring_fini(&(adev->cper.ring_buf));
+ adev->cper.count = 0;
+ adev->cper.wptr = 0;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
new file mode 100644
index 000000000000..bcb97d245673
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_CPER_H__
+#define __AMDGPU_CPER_H__
+
+#include "amd_cper.h"
+#include "amdgpu_aca.h"
+
+#define CPER_MAX_ALLOWED_COUNT 0x1000
+#define CPER_MAX_RING_SIZE 0X100000
+#define HDR_LEN (sizeof(struct cper_hdr))
+#define SEC_DESC_LEN (sizeof(struct cper_sec_desc))
+
+#define BOOT_SEC_LEN (sizeof(struct cper_sec_crashdump_boot))
+#define FATAL_SEC_LEN (sizeof(struct cper_sec_crashdump_fatal))
+#define NONSTD_SEC_LEN (sizeof(struct cper_sec_nonstd_err))
+
+#define SEC_DESC_OFFSET(idx) (HDR_LEN + (SEC_DESC_LEN * idx))
+
+#define BOOT_SEC_OFFSET(count, idx) (HDR_LEN + (SEC_DESC_LEN * count) + (BOOT_SEC_LEN * idx))
+#define FATAL_SEC_OFFSET(count, idx) (HDR_LEN + (SEC_DESC_LEN * count) + (FATAL_SEC_LEN * idx))
+#define NONSTD_SEC_OFFSET(count, idx) (HDR_LEN + (SEC_DESC_LEN * count) + (NONSTD_SEC_LEN * idx))
+
+enum amdgpu_cper_type {
+ AMDGPU_CPER_TYPE_RUNTIME,
+ AMDGPU_CPER_TYPE_FATAL,
+ AMDGPU_CPER_TYPE_BOOT,
+ AMDGPU_CPER_TYPE_BP_THRESHOLD,
+};
+
+struct amdgpu_cper {
+ bool enabled;
+
+ atomic_t unique_id;
+ struct mutex cper_lock;
+
+ /* Lifetime CPERs generated */
+ uint32_t count;
+ uint32_t max_count;
+
+ uint32_t wptr;
+
+ void *ring[CPER_MAX_ALLOWED_COUNT];
+ struct amdgpu_ring ring_buf;
+ struct mutex ring_lock;
+};
+
+void amdgpu_cper_entry_fill_hdr(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ enum amdgpu_cper_type type,
+ enum cper_error_severity sev);
+int amdgpu_cper_entry_fill_fatal_section(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ uint32_t idx,
+ struct cper_sec_crashdump_reg_data reg_data);
+int amdgpu_cper_entry_fill_runtime_section(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ uint32_t idx,
+ enum cper_error_severity sev,
+ uint32_t *reg_dump,
+ uint32_t reg_count);
+int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev,
+ struct cper_hdr *hdr,
+ uint32_t section_idx);
+
+struct cper_hdr *amdgpu_cper_alloc_entry(struct amdgpu_device *adev,
+ enum amdgpu_cper_type type,
+ uint16_t section_count);
+/* UE must be encoded into separated cper entries, 1 UE 1 cper */
+int amdgpu_cper_generate_ue_record(struct amdgpu_device *adev,
+ struct aca_bank *bank);
+/* CEs and DEs are combined into 1 cper entry */
+int amdgpu_cper_generate_ce_records(struct amdgpu_device *adev,
+ struct aca_banks *banks,
+ uint16_t bank_count);
+/* Bad page threshold is encoded into separated cper entry */
+int amdgpu_cper_generate_bp_threshold_record(struct amdgpu_device *adev);
+void amdgpu_cper_ring_write(struct amdgpu_ring *ring,
+ void *src, int count);
+int amdgpu_cper_init(struct amdgpu_device *adev);
+int amdgpu_cper_fini(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 4e6b9501ab0a..9cd7741d2254 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -24,87 +24,108 @@
* Authors:
* Jerome Glisse <glisse@freedesktop.org>
*/
+
+#include <linux/file.h>
#include <linux/pagemap.h>
-#include <drm/drmP.h>
+#include <linux/sync_file.h>
+#include <linux/dma-buf.h>
+#include <linux/hmm.h>
+
#include <drm/amdgpu_drm.h>
+#include <drm/drm_syncobj.h>
+#include <drm/ttm/ttm_tt.h>
+
+#include "amdgpu_cs.h"
#include "amdgpu.h"
#include "amdgpu_trace.h"
-
-int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
- u32 ip_instance, u32 ring,
- struct amdgpu_ring **out_ring)
+#include "amdgpu_gmc.h"
+#include "amdgpu_gem.h"
+#include "amdgpu_ras.h"
+
+static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
+ struct amdgpu_device *adev,
+ struct drm_file *filp,
+ union drm_amdgpu_cs *cs)
{
- /* Right now all IPs have only one instance - multiple rings. */
- if (ip_instance != 0) {
- DRM_ERROR("invalid ip instance: %d\n", ip_instance);
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+
+ if (cs->in.num_chunks == 0)
return -EINVAL;
- }
- switch (ip_type) {
- default:
- DRM_ERROR("unknown ip type: %d\n", ip_type);
+ memset(p, 0, sizeof(*p));
+ p->adev = adev;
+ p->filp = filp;
+
+ p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
+ if (!p->ctx)
return -EINVAL;
- case AMDGPU_HW_IP_GFX:
- if (ring < adev->gfx.num_gfx_rings) {
- *out_ring = &adev->gfx.gfx_ring[ring];
- } else {
- DRM_ERROR("only %d gfx rings are supported now\n",
- adev->gfx.num_gfx_rings);
- return -EINVAL;
- }
- break;
- case AMDGPU_HW_IP_COMPUTE:
- if (ring < adev->gfx.num_compute_rings) {
- *out_ring = &adev->gfx.compute_ring[ring];
- } else {
- DRM_ERROR("only %d compute rings are supported now\n",
- adev->gfx.num_compute_rings);
- return -EINVAL;
- }
- break;
- case AMDGPU_HW_IP_DMA:
- if (ring < adev->sdma.num_instances) {
- *out_ring = &adev->sdma.instance[ring].ring;
- } else {
- DRM_ERROR("only %d SDMA rings are supported\n",
- adev->sdma.num_instances);
- return -EINVAL;
- }
- break;
- case AMDGPU_HW_IP_UVD:
- *out_ring = &adev->uvd.ring;
- break;
- case AMDGPU_HW_IP_VCE:
- if (ring < adev->vce.num_rings){
- *out_ring = &adev->vce.ring[ring];
- } else {
- DRM_ERROR("only %d VCE rings are supported\n", adev->vce.num_rings);
- return -EINVAL;
- }
- break;
- case AMDGPU_HW_IP_UVD_ENC:
- if (ring < adev->uvd.num_enc_rings){
- *out_ring = &adev->uvd.ring_enc[ring];
- } else {
- DRM_ERROR("only %d UVD ENC rings are supported\n",
- adev->uvd.num_enc_rings);
- return -EINVAL;
- }
- break;
+
+ if (atomic_read(&p->ctx->guilty)) {
+ amdgpu_ctx_put(p->ctx);
+ return -ECANCELED;
}
- if (!(*out_ring && (*out_ring)->adev)) {
- DRM_ERROR("Ring %d is not initialized on IP %d\n",
- ring, ip_type);
+ amdgpu_sync_create(&p->sync);
+ drm_exec_init(&p->exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES, 0);
+ return 0;
+}
+
+static int amdgpu_cs_job_idx(struct amdgpu_cs_parser *p,
+ struct drm_amdgpu_cs_chunk_ib *chunk_ib)
+{
+ struct drm_sched_entity *entity;
+ unsigned int i;
+ int r;
+
+ r = amdgpu_ctx_get_entity(p->ctx, chunk_ib->ip_type,
+ chunk_ib->ip_instance,
+ chunk_ib->ring, &entity);
+ if (r)
+ return r;
+
+ /*
+ * Abort if there is no run queue associated with this entity.
+ * Possibly because of disabled HW IP.
+ */
+ if (entity->rq == NULL)
return -EINVAL;
- }
+ /* Check if we can add this IB to some existing job */
+ for (i = 0; i < p->gang_size; ++i)
+ if (p->entities[i] == entity)
+ return i;
+
+ /* If not increase the gang size if possible */
+ if (i == AMDGPU_CS_GANG_SIZE)
+ return -EINVAL;
+
+ p->entities[i] = entity;
+ p->gang_size = i + 1;
+ return i;
+}
+
+static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
+ struct drm_amdgpu_cs_chunk_ib *chunk_ib,
+ unsigned int *num_ibs)
+{
+ int r;
+
+ r = amdgpu_cs_job_idx(p, chunk_ib);
+ if (r < 0)
+ return r;
+
+ if (num_ibs[r] >= amdgpu_ring_max_ibs(chunk_ib->ip_type))
+ return -EINVAL;
+
+ ++(num_ibs[r]);
+ p->gang_leader_idx = r;
return 0;
}
-static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
- struct drm_amdgpu_cs_chunk_fence *data,
- uint32_t *offset)
+static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
+ struct drm_amdgpu_cs_chunk_fence *data,
+ uint32_t *offset)
{
struct drm_gem_object *gobj;
unsigned long size;
@@ -113,75 +134,76 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
if (gobj == NULL)
return -EINVAL;
- p->uf_entry.robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
- p->uf_entry.priority = 0;
- p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
- p->uf_entry.tv.shared = true;
- p->uf_entry.user_pages = NULL;
+ p->uf_bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
+ drm_gem_object_put(gobj);
- size = amdgpu_bo_size(p->uf_entry.robj);
- if (size != PAGE_SIZE || (data->offset + 8) > size)
+ size = amdgpu_bo_size(p->uf_bo);
+ if (size != PAGE_SIZE || data->offset > (size - 8))
+ return -EINVAL;
+
+ if (amdgpu_ttm_tt_get_usermm(p->uf_bo->tbo.ttm))
return -EINVAL;
*offset = data->offset;
+ return 0;
+}
+
+static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p,
+ struct drm_amdgpu_bo_list_in *data)
+{
+ struct drm_amdgpu_bo_list_entry *info;
+ int r;
- drm_gem_object_unreference_unlocked(gobj);
+ r = amdgpu_bo_create_list_entry_array(data, &info);
+ if (r)
+ return r;
- if (amdgpu_ttm_tt_get_usermm(p->uf_entry.robj->tbo.ttm)) {
- amdgpu_bo_unref(&p->uf_entry.robj);
- return -EINVAL;
- }
+ r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
+ &p->bo_list);
+ if (r)
+ goto error_free;
+ kvfree(info);
return 0;
+
+error_free:
+ kvfree(info);
+
+ return r;
}
-int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
+/* Copy the data from userspace and go over it the first time */
+static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
+ union drm_amdgpu_cs *cs)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { };
struct amdgpu_vm *vm = &fpriv->vm;
- union drm_amdgpu_cs *cs = data;
- uint64_t *chunk_array_user;
uint64_t *chunk_array;
- unsigned size, num_ibs = 0;
uint32_t uf_offset = 0;
- int i;
+ size_t size;
int ret;
+ int i;
- if (cs->in.num_chunks == 0)
- return 0;
-
- chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
- if (!chunk_array)
- return -ENOMEM;
-
- p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
- if (!p->ctx) {
- ret = -EINVAL;
- goto free_chunk;
- }
-
- /* get chunks */
- chunk_array_user = (uint64_t __user *)(uintptr_t)(cs->in.chunks);
- if (copy_from_user(chunk_array, chunk_array_user,
- sizeof(uint64_t)*cs->in.num_chunks)) {
- ret = -EFAULT;
- goto put_ctx;
- }
+ chunk_array = memdup_array_user(u64_to_user_ptr(cs->in.chunks),
+ cs->in.num_chunks,
+ sizeof(uint64_t));
+ if (IS_ERR(chunk_array))
+ return PTR_ERR(chunk_array);
p->nchunks = cs->in.num_chunks;
- p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
+ p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
GFP_KERNEL);
if (!p->chunks) {
ret = -ENOMEM;
- goto put_ctx;
+ goto free_chunk;
}
for (i = 0; i < p->nchunks; i++) {
- struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
+ struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL;
struct drm_amdgpu_cs_chunk user_chunk;
- uint32_t __user *cdata;
- chunk_ptr = (void __user *)(uintptr_t)chunk_array[i];
+ chunk_ptr = u64_to_user_ptr(chunk_array[i]);
if (copy_from_user(&user_chunk, chunk_ptr,
sizeof(struct drm_amdgpu_cs_chunk))) {
ret = -EFAULT;
@@ -192,73 +214,446 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
p->chunks[i].length_dw = user_chunk.length_dw;
size = p->chunks[i].length_dw;
- cdata = (void __user *)(uintptr_t)user_chunk.chunk_data;
- p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
- if (p->chunks[i].kdata == NULL) {
- ret = -ENOMEM;
+ p->chunks[i].kdata = vmemdup_array_user(u64_to_user_ptr(user_chunk.chunk_data),
+ size,
+ sizeof(uint32_t));
+ if (IS_ERR(p->chunks[i].kdata)) {
+ ret = PTR_ERR(p->chunks[i].kdata);
i--;
goto free_partial_kdata;
}
size *= sizeof(uint32_t);
- if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
- ret = -EFAULT;
- goto free_partial_kdata;
- }
+ /* Assume the worst on the following checks */
+ ret = -EINVAL;
switch (p->chunks[i].chunk_id) {
case AMDGPU_CHUNK_ID_IB:
- ++num_ibs;
+ if (size < sizeof(struct drm_amdgpu_cs_chunk_ib))
+ goto free_partial_kdata;
+
+ ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, num_ibs);
+ if (ret)
+ goto free_partial_kdata;
break;
case AMDGPU_CHUNK_ID_FENCE:
- size = sizeof(struct drm_amdgpu_cs_chunk_fence);
- if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
- ret = -EINVAL;
+ if (size < sizeof(struct drm_amdgpu_cs_chunk_fence))
goto free_partial_kdata;
- }
- ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
- &uf_offset);
+ ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata,
+ &uf_offset);
if (ret)
goto free_partial_kdata;
+ break;
+
+ case AMDGPU_CHUNK_ID_BO_HANDLES:
+ if (size < sizeof(struct drm_amdgpu_bo_list_in))
+ goto free_partial_kdata;
+ /* Only a single BO list is allowed to simplify handling. */
+ if (p->bo_list)
+ goto free_partial_kdata;
+
+ ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
+ if (ret)
+ goto free_partial_kdata;
break;
case AMDGPU_CHUNK_ID_DEPENDENCIES:
+ case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
+ case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
+ case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
+ case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
+ case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
+ case AMDGPU_CHUNK_ID_CP_GFX_SHADOW:
break;
default:
- ret = -EINVAL;
goto free_partial_kdata;
}
}
- ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
- if (ret)
+ if (!p->gang_size || (amdgpu_sriov_vf(p->adev) && p->gang_size > 1)) {
+ ret = -EINVAL;
+ goto free_all_kdata;
+ }
+
+ for (i = 0; i < p->gang_size; ++i) {
+ ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm,
+ num_ibs[i], &p->jobs[i],
+ p->filp->client_id);
+ if (ret)
+ goto free_all_kdata;
+ switch (p->adev->enforce_isolation[fpriv->xcp_id]) {
+ case AMDGPU_ENFORCE_ISOLATION_DISABLE:
+ default:
+ p->jobs[i]->enforce_isolation = false;
+ p->jobs[i]->run_cleaner_shader = false;
+ break;
+ case AMDGPU_ENFORCE_ISOLATION_ENABLE:
+ p->jobs[i]->enforce_isolation = true;
+ p->jobs[i]->run_cleaner_shader = true;
+ break;
+ case AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY:
+ p->jobs[i]->enforce_isolation = true;
+ p->jobs[i]->run_cleaner_shader = false;
+ break;
+ case AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER:
+ p->jobs[i]->enforce_isolation = true;
+ p->jobs[i]->run_cleaner_shader = false;
+ break;
+ }
+ }
+ p->gang_leader = p->jobs[p->gang_leader_idx];
+
+ if (p->ctx->generation != p->gang_leader->generation) {
+ ret = -ECANCELED;
goto free_all_kdata;
+ }
+
+ if (p->uf_bo)
+ p->gang_leader->uf_addr = uf_offset;
+ kvfree(chunk_array);
+
+ /* Use this opportunity to fill in task info for the vm */
+ amdgpu_vm_set_task_info(vm);
- if (p->uf_entry.robj)
- p->job->uf_addr = uf_offset;
- kfree(chunk_array);
return 0;
free_all_kdata:
i = p->nchunks - 1;
free_partial_kdata:
for (; i >= 0; i--)
- drm_free_large(p->chunks[i].kdata);
- kfree(p->chunks);
+ kvfree(p->chunks[i].kdata);
+ kvfree(p->chunks);
p->chunks = NULL;
p->nchunks = 0;
-put_ctx:
- amdgpu_ctx_put(p->ctx);
free_chunk:
- kfree(chunk_array);
+ kvfree(chunk_array);
return ret;
}
+static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk,
+ unsigned int *ce_preempt,
+ unsigned int *de_preempt)
+{
+ struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata;
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
+ struct amdgpu_ring *ring;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ int r;
+
+ r = amdgpu_cs_job_idx(p, chunk_ib);
+ if (r < 0)
+ return r;
+
+ job = p->jobs[r];
+ ring = amdgpu_job_ring(job);
+ ib = &job->ibs[job->num_ibs++];
+
+ /* submissions to kernel queues are disabled */
+ if (ring->no_user_submission)
+ return -EINVAL;
+
+ /* MM engine doesn't support user fences */
+ if (p->uf_bo && ring->funcs->no_user_fence)
+ return -EINVAL;
+
+ if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
+ chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
+ if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
+ (*ce_preempt)++;
+ else
+ (*de_preempt)++;
+
+ /* Each GFX command submit allows only 1 IB max
+ * preemptible for CE & DE */
+ if (*ce_preempt > 1 || *de_preempt > 1)
+ return -EINVAL;
+ }
+
+ if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
+ job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
+
+ r = amdgpu_ib_get(p->adev, vm, ring->funcs->parse_cs ?
+ chunk_ib->ib_bytes : 0,
+ AMDGPU_IB_POOL_DELAYED, ib);
+ if (r) {
+ drm_err(adev_to_drm(p->adev), "Failed to get ib !\n");
+ return r;
+ }
+
+ ib->gpu_addr = chunk_ib->va_start;
+ ib->length_dw = chunk_ib->ib_bytes / 4;
+ ib->flags = chunk_ib->flags;
+ return 0;
+}
+
+static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata;
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ unsigned int num_deps;
+ int i, r;
+
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_dep);
+
+ for (i = 0; i < num_deps; ++i) {
+ struct amdgpu_ctx *ctx;
+ struct drm_sched_entity *entity;
+ struct dma_fence *fence;
+
+ ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
+ if (ctx == NULL)
+ return -EINVAL;
+
+ r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
+ deps[i].ip_instance,
+ deps[i].ring, &entity);
+ if (r) {
+ amdgpu_ctx_put(ctx);
+ return r;
+ }
+
+ fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
+ amdgpu_ctx_put(ctx);
+
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+ else if (!fence)
+ continue;
+
+ if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
+ struct drm_sched_fence *s_fence;
+ struct dma_fence *old = fence;
+
+ s_fence = to_drm_sched_fence(fence);
+ fence = dma_fence_get(&s_fence->scheduled);
+ dma_fence_put(old);
+ }
+
+ r = amdgpu_sync_fence(&p->sync, fence, GFP_KERNEL);
+ dma_fence_put(fence);
+ if (r)
+ return r;
+ }
+ return 0;
+}
+
+static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
+ uint32_t handle, u64 point,
+ u64 flags)
+{
+ struct dma_fence *fence;
+ int r;
+
+ r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
+ if (r) {
+ drm_err(adev_to_drm(p->adev), "syncobj %u failed to find fence @ %llu (%d)!\n",
+ handle, point, r);
+ return r;
+ }
+
+ r = amdgpu_sync_fence(&p->sync, fence, GFP_KERNEL);
+ dma_fence_put(fence);
+ return r;
+}
+
+static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
+ unsigned int num_deps;
+ int i, r;
+
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_sem);
+ for (i = 0; i < num_deps; ++i) {
+ r = amdgpu_syncobj_lookup_and_add(p, deps[i].handle, 0, 0);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
+ unsigned int num_deps;
+ int i, r;
+
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_syncobj);
+ for (i = 0; i < num_deps; ++i) {
+ r = amdgpu_syncobj_lookup_and_add(p, syncobj_deps[i].handle,
+ syncobj_deps[i].point,
+ syncobj_deps[i].flags);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata;
+ unsigned int num_deps;
+ int i;
+
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_sem);
+
+ if (p->post_deps)
+ return -EINVAL;
+
+ p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
+ GFP_KERNEL);
+ p->num_post_deps = 0;
+
+ if (!p->post_deps)
+ return -ENOMEM;
+
+
+ for (i = 0; i < num_deps; ++i) {
+ p->post_deps[i].syncobj =
+ drm_syncobj_find(p->filp, deps[i].handle);
+ if (!p->post_deps[i].syncobj)
+ return -EINVAL;
+ p->post_deps[i].chain = NULL;
+ p->post_deps[i].point = 0;
+ p->num_post_deps++;
+ }
+
+ return 0;
+}
+
+static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata;
+ unsigned int num_deps;
+ int i;
+
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_syncobj);
+
+ if (p->post_deps)
+ return -EINVAL;
+
+ p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
+ GFP_KERNEL);
+ p->num_post_deps = 0;
+
+ if (!p->post_deps)
+ return -ENOMEM;
+
+ for (i = 0; i < num_deps; ++i) {
+ struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
+
+ dep->chain = NULL;
+ if (syncobj_deps[i].point) {
+ dep->chain = dma_fence_chain_alloc();
+ if (!dep->chain)
+ return -ENOMEM;
+ }
+
+ dep->syncobj = drm_syncobj_find(p->filp,
+ syncobj_deps[i].handle);
+ if (!dep->syncobj) {
+ dma_fence_chain_free(dep->chain);
+ return -EINVAL;
+ }
+ dep->point = syncobj_deps[i].point;
+ p->num_post_deps++;
+ }
+
+ return 0;
+}
+
+static int amdgpu_cs_p2_shadow(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ struct drm_amdgpu_cs_chunk_cp_gfx_shadow *shadow = chunk->kdata;
+ int i;
+
+ if (shadow->flags & ~AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW)
+ return -EINVAL;
+
+ for (i = 0; i < p->gang_size; ++i) {
+ p->jobs[i]->shadow_va = shadow->shadow_va;
+ p->jobs[i]->csa_va = shadow->csa_va;
+ p->jobs[i]->gds_va = shadow->gds_va;
+ p->jobs[i]->init_shadow =
+ shadow->flags & AMDGPU_CS_CHUNK_CP_GFX_SHADOW_FLAGS_INIT_SHADOW;
+ }
+
+ return 0;
+}
+
+static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p)
+{
+ unsigned int ce_preempt = 0, de_preempt = 0;
+ int i, r;
+
+ for (i = 0; i < p->nchunks; ++i) {
+ struct amdgpu_cs_chunk *chunk;
+
+ chunk = &p->chunks[i];
+
+ switch (chunk->chunk_id) {
+ case AMDGPU_CHUNK_ID_IB:
+ r = amdgpu_cs_p2_ib(p, chunk, &ce_preempt, &de_preempt);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_CHUNK_ID_DEPENDENCIES:
+ case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
+ r = amdgpu_cs_p2_dependencies(p, chunk);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
+ r = amdgpu_cs_p2_syncobj_in(p, chunk);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
+ r = amdgpu_cs_p2_syncobj_out(p, chunk);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
+ r = amdgpu_cs_p2_syncobj_timeline_wait(p, chunk);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
+ r = amdgpu_cs_p2_syncobj_timeline_signal(p, chunk);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_CHUNK_ID_CP_GFX_SHADOW:
+ r = amdgpu_cs_p2_shadow(p, chunk);
+ if (r)
+ return r;
+ break;
+ }
+ }
+
+ return 0;
+}
+
/* Convert microseconds to bytes. */
static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
{
@@ -292,12 +687,12 @@ static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
* ticks. The accumulated microseconds (us) are converted to bytes and
* returned.
*/
-static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
+static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
+ u64 *max_bytes,
+ u64 *max_vis_bytes)
{
s64 time_us, increment_us;
- u64 max_bytes;
u64 free_vram, total_vram, used_vram;
-
/* Allow a maximum of 200 accumulated ms. This is basically per-IB
* throttling.
*
@@ -307,11 +702,14 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
*/
const s64 us_upper_bound = 200000;
- if (!adev->mm_stats.log2_max_MBps)
- return 0;
+ if (!adev->mm_stats.log2_max_MBps) {
+ *max_bytes = 0;
+ *max_vis_bytes = 0;
+ return;
+ }
- total_vram = adev->mc.real_vram_size - adev->vram_pin_size;
- used_vram = atomic64_read(&adev->vram_usage);
+ total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
+ used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
spin_lock(&adev->mm_stats.lock);
@@ -321,7 +719,7 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
increment_us = time_us - adev->mm_stats.last_update_us;
adev->mm_stats.last_update_us = time_us;
adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
- us_upper_bound);
+ us_upper_bound);
/* This prevents the short period of low performance when the VRAM
* usage is low and the driver is in debt or doesn't have enough
@@ -338,7 +736,7 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
s64 min_us;
- /* Be more aggresive on dGPUs. Try to fill a portion of free
+ /* Be more aggressive on dGPUs. Try to fill a portion of free
* VRAM now.
*/
if (!(adev->flags & AMD_IS_APU))
@@ -349,51 +747,96 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
}
- /* This returns 0 if the driver is in debt to disallow (optional)
+ /* This is set to 0 if the driver is in debt to disallow (optional)
* buffer moves.
*/
- max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
+ *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
+
+ /* Do the same for visible VRAM if half of it is free */
+ if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
+ u64 total_vis_vram = adev->gmc.visible_vram_size;
+ u64 used_vis_vram =
+ amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
+
+ if (used_vis_vram < total_vis_vram) {
+ u64 free_vis_vram = total_vis_vram - used_vis_vram;
+
+ adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
+ increment_us, us_upper_bound);
+
+ if (free_vis_vram >= total_vis_vram / 2)
+ adev->mm_stats.accum_us_vis =
+ max(bytes_to_us(adev, free_vis_vram / 2),
+ adev->mm_stats.accum_us_vis);
+ }
+
+ *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
+ } else {
+ *max_vis_bytes = 0;
+ }
spin_unlock(&adev->mm_stats.lock);
- return max_bytes;
}
/* Report how many bytes have really been moved for the last command
* submission. This can result in a debt that can stop buffer migrations
* temporarily.
*/
-void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes)
+void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
+ u64 num_vis_bytes)
{
spin_lock(&adev->mm_stats.lock);
adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
+ adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
spin_unlock(&adev->mm_stats.lock);
}
-static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
- struct amdgpu_bo *bo)
+static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- u64 initial_bytes_moved;
+ struct amdgpu_cs_parser *p = param;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = false,
+ .resv = bo->tbo.base.resv
+ };
uint32_t domain;
int r;
- if (bo->pin_count)
+ if (bo->tbo.pin_count)
return 0;
/* Don't move this buffer if we have depleted our allowance
* to move it. Don't move anything if the threshold is zero.
*/
- if (p->bytes_moved < p->bytes_moved_threshold)
- domain = bo->prefered_domains;
- else
+ if (p->bytes_moved < p->bytes_moved_threshold &&
+ (!bo->tbo.base.dma_buf ||
+ list_empty(&bo->tbo.base.dma_buf->attachments))) {
+ if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
+ (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
+ /* And don't move a CPU_ACCESS_REQUIRED BO to limited
+ * visible VRAM if we've depleted our allowance to do
+ * that.
+ */
+ if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
+ domain = bo->preferred_domains;
+ else
+ domain = bo->allowed_domains;
+ } else {
+ domain = bo->preferred_domains;
+ }
+ } else {
domain = bo->allowed_domains;
+ }
retry:
- amdgpu_ttm_placement_from_domain(bo, domain);
- initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
- initial_bytes_moved;
+ amdgpu_bo_placement_from_domain(bo, domain);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+
+ p->bytes_moved += ctx.bytes_moved;
+ if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
+ amdgpu_res_cpu_visible(adev, bo->tbo.resource))
+ p->bytes_moved_vis += ctx.bytes_moved;
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
domain = bo->allowed_domains;
@@ -403,387 +846,269 @@ retry:
return r;
}
-/* Last resort, try to evict something from the current working set */
-static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
- struct amdgpu_bo *validated)
-{
- uint32_t domain = validated->allowed_domains;
- int r;
-
- if (!p->evictable)
- return false;
-
- for (;&p->evictable->tv.head != &p->validated;
- p->evictable = list_prev_entry(p->evictable, tv.head)) {
-
- struct amdgpu_bo_list_entry *candidate = p->evictable;
- struct amdgpu_bo *bo = candidate->robj;
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- u64 initial_bytes_moved;
- uint32_t other;
-
- /* If we reached our current BO we can forget it */
- if (candidate->robj == validated)
- break;
-
- other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
-
- /* Check if this BO is in one of the domains we need space for */
- if (!(other & domain))
- continue;
-
- /* Check if we can move this BO somewhere else */
- other = bo->allowed_domains & ~domain;
- if (!other)
- continue;
-
- /* Good we can try to move this BO somewhere else */
- amdgpu_ttm_placement_from_domain(bo, other);
- initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- p->bytes_moved += atomic64_read(&adev->num_bytes_moved) -
- initial_bytes_moved;
-
- if (unlikely(r))
- break;
-
- p->evictable = list_prev_entry(p->evictable, tv.head);
- list_move(&candidate->tv.head, &p->validated);
-
- return true;
- }
-
- return false;
-}
-
-static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
-{
- struct amdgpu_cs_parser *p = param;
- int r;
-
- do {
- r = amdgpu_cs_bo_validate(p, bo);
- } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo));
- if (r)
- return r;
-
- if (bo->shadow)
- r = amdgpu_cs_bo_validate(p, bo->shadow);
-
- return r;
-}
-
-static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
- struct list_head *validated)
-{
- struct amdgpu_bo_list_entry *lobj;
- int r;
-
- list_for_each_entry(lobj, validated, tv.head) {
- struct amdgpu_bo *bo = lobj->robj;
- bool binding_userptr = false;
- struct mm_struct *usermm;
-
- usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
- if (usermm && usermm != current->mm)
- return -EPERM;
-
- /* Check if we have user pages and nobody bound the BO already */
- if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) {
- size_t size = sizeof(struct page *);
-
- size *= bo->tbo.ttm->num_pages;
- memcpy(bo->tbo.ttm->pages, lobj->user_pages, size);
- binding_userptr = true;
- }
-
- if (p->evictable == lobj)
- p->evictable = NULL;
-
- r = amdgpu_cs_validate(p, bo);
- if (r)
- return r;
-
- if (binding_userptr) {
- drm_free_large(lobj->user_pages);
- lobj->user_pages = NULL;
- }
- }
- return 0;
-}
-
static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct ttm_operation_ctx ctx = { true, false };
+ struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry *e;
- struct list_head duplicates;
- bool need_mmap_lock = false;
- unsigned i, tries = 10;
+ struct drm_gem_object *obj;
+ unsigned long index;
+ unsigned int i;
int r;
- INIT_LIST_HEAD(&p->validated);
+ /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
+ if (cs->in.bo_list_handle) {
+ if (p->bo_list)
+ return -EINVAL;
- p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
- if (p->bo_list) {
- need_mmap_lock = p->bo_list->first_userptr !=
- p->bo_list->num_entries;
- amdgpu_bo_list_get_list(p->bo_list, &p->validated);
+ r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
+ &p->bo_list);
+ if (r)
+ return r;
+ } else if (!p->bo_list) {
+ /* Create a empty bo_list when no handle is provided */
+ r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
+ &p->bo_list);
+ if (r)
+ return r;
}
- INIT_LIST_HEAD(&duplicates);
- amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
-
- if (p->uf_entry.robj)
- list_add(&p->uf_entry.tv.head, &p->validated);
+ mutex_lock(&p->bo_list->bo_list_mutex);
- if (need_mmap_lock)
- down_read(&current->mm->mmap_sem);
+ /* Get userptr backing pages. If pages are updated after registered
+ * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
+ * amdgpu_ttm_backend_bind() to flush and invalidate new pages
+ */
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ bool userpage_invalidated = false;
+ struct amdgpu_bo *bo = e->bo;
- while (1) {
- struct list_head need_pages;
- unsigned i;
+ r = amdgpu_ttm_tt_get_user_pages(bo, &e->range);
+ if (r)
+ goto out_free_user_pages;
- r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
- &duplicates);
- if (unlikely(r != 0)) {
- if (r != -ERESTARTSYS)
- DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
- goto error_free_pages;
+ for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
+ if (bo->tbo.ttm->pages[i] != hmm_pfn_to_page(e->range->hmm_pfns[i])) {
+ userpage_invalidated = true;
+ break;
+ }
}
+ e->user_invalidated = userpage_invalidated;
+ }
- /* Without a BO list we don't have userptr BOs */
- if (!p->bo_list)
- break;
-
- INIT_LIST_HEAD(&need_pages);
- for (i = p->bo_list->first_userptr;
- i < p->bo_list->num_entries; ++i) {
-
- e = &p->bo_list->array[i];
-
- if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm,
- &e->user_invalidated) && e->user_pages) {
-
- /* We acquired a page array, but somebody
- * invalidated it. Free it an try again
- */
- release_pages(e->user_pages,
- e->robj->tbo.ttm->num_pages,
- false);
- drm_free_large(e->user_pages);
- e->user_pages = NULL;
- }
+ drm_exec_until_all_locked(&p->exec) {
+ r = amdgpu_vm_lock_pd(&fpriv->vm, &p->exec, 1 + p->gang_size);
+ drm_exec_retry_on_contention(&p->exec);
+ if (unlikely(r))
+ goto out_free_user_pages;
- if (e->robj->tbo.ttm->state != tt_bound &&
- !e->user_pages) {
- list_del(&e->tv.head);
- list_add(&e->tv.head, &need_pages);
+ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
+ /* One fence for TTM and one for each CS job */
+ r = drm_exec_prepare_obj(&p->exec, &e->bo->tbo.base,
+ 1 + p->gang_size);
+ drm_exec_retry_on_contention(&p->exec);
+ if (unlikely(r))
+ goto out_free_user_pages;
- amdgpu_bo_unreserve(e->robj);
- }
+ e->bo_va = amdgpu_vm_bo_find(vm, e->bo);
}
- if (list_empty(&need_pages))
- break;
+ if (p->uf_bo) {
+ r = drm_exec_prepare_obj(&p->exec, &p->uf_bo->tbo.base,
+ 1 + p->gang_size);
+ drm_exec_retry_on_contention(&p->exec);
+ if (unlikely(r))
+ goto out_free_user_pages;
+ }
+ }
- /* Unreserve everything again. */
- ttm_eu_backoff_reservation(&p->ticket, &p->validated);
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ struct mm_struct *usermm;
- /* We tried too many times, just abort */
- if (!--tries) {
- r = -EDEADLK;
- DRM_ERROR("deadlock in %s\n", __func__);
- goto error_free_pages;
+ usermm = amdgpu_ttm_tt_get_usermm(e->bo->tbo.ttm);
+ if (usermm && usermm != current->mm) {
+ r = -EPERM;
+ goto out_free_user_pages;
}
- /* Fill the page arrays for all useptrs. */
- list_for_each_entry(e, &need_pages, tv.head) {
- struct ttm_tt *ttm = e->robj->tbo.ttm;
-
- e->user_pages = drm_calloc_large(ttm->num_pages,
- sizeof(struct page*));
- if (!e->user_pages) {
- r = -ENOMEM;
- DRM_ERROR("calloc failure in %s\n", __func__);
- goto error_free_pages;
- }
+ if (amdgpu_ttm_tt_is_userptr(e->bo->tbo.ttm) &&
+ e->user_invalidated) {
+ amdgpu_bo_placement_from_domain(e->bo,
+ AMDGPU_GEM_DOMAIN_CPU);
+ r = ttm_bo_validate(&e->bo->tbo, &e->bo->placement,
+ &ctx);
+ if (r)
+ goto out_free_user_pages;
- r = amdgpu_ttm_tt_get_user_pages(ttm, e->user_pages);
- if (r) {
- DRM_ERROR("amdgpu_ttm_tt_get_user_pages failed.\n");
- drm_free_large(e->user_pages);
- e->user_pages = NULL;
- goto error_free_pages;
- }
+ amdgpu_ttm_tt_set_user_pages(e->bo->tbo.ttm,
+ e->range);
}
-
- /* And try again. */
- list_splice(&need_pages, &p->validated);
}
- p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
+ amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
+ &p->bytes_moved_vis_threshold);
p->bytes_moved = 0;
- p->evictable = list_last_entry(&p->validated,
- struct amdgpu_bo_list_entry,
- tv.head);
+ p->bytes_moved_vis = 0;
- r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
- amdgpu_cs_validate, p);
+ r = amdgpu_vm_validate(p->adev, &fpriv->vm, NULL,
+ amdgpu_cs_bo_validate, p);
if (r) {
- DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
- goto error_validate;
+ drm_err(adev_to_drm(p->adev), "amdgpu_vm_validate() failed.\n");
+ goto out_free_user_pages;
}
- r = amdgpu_cs_list_validate(p, &duplicates);
- if (r) {
- DRM_ERROR("amdgpu_cs_list_validate(duplicates) failed.\n");
- goto error_validate;
+ drm_exec_for_each_locked_object(&p->exec, index, obj) {
+ r = amdgpu_cs_bo_validate(p, gem_to_amdgpu_bo(obj));
+ if (unlikely(r))
+ goto out_free_user_pages;
}
- r = amdgpu_cs_list_validate(p, &p->validated);
- if (r) {
- DRM_ERROR("amdgpu_cs_list_validate(validated) failed.\n");
- goto error_validate;
+ if (p->uf_bo) {
+ r = amdgpu_ttm_alloc_gart(&p->uf_bo->tbo);
+ if (unlikely(r))
+ goto out_free_user_pages;
+
+ p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(p->uf_bo);
}
- amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved);
+ amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
+ p->bytes_moved_vis);
- fpriv->vm.last_eviction_counter =
- atomic64_read(&p->adev->num_evictions);
+ for (i = 0; i < p->gang_size; ++i)
+ amdgpu_job_set_resources(p->jobs[i], p->bo_list->gds_obj,
+ p->bo_list->gws_obj,
+ p->bo_list->oa_obj);
+ return 0;
- if (p->bo_list) {
- struct amdgpu_bo *gds = p->bo_list->gds_obj;
- struct amdgpu_bo *gws = p->bo_list->gws_obj;
- struct amdgpu_bo *oa = p->bo_list->oa_obj;
- struct amdgpu_vm *vm = &fpriv->vm;
- unsigned i;
+out_free_user_pages:
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ struct amdgpu_bo *bo = e->bo;
- for (i = 0; i < p->bo_list->num_entries; i++) {
- struct amdgpu_bo *bo = p->bo_list->array[i].robj;
+ amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
+ e->range = NULL;
+ }
+ mutex_unlock(&p->bo_list->bo_list_mutex);
+ return r;
+}
- p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
- }
+static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *p)
+{
+ int i, j;
- if (gds) {
- p->job->gds_base = amdgpu_bo_gpu_offset(gds);
- p->job->gds_size = amdgpu_bo_size(gds);
- }
- if (gws) {
- p->job->gws_base = amdgpu_bo_gpu_offset(gws);
- p->job->gws_size = amdgpu_bo_size(gws);
- }
- if (oa) {
- p->job->oa_base = amdgpu_bo_gpu_offset(oa);
- p->job->oa_size = amdgpu_bo_size(oa);
- }
- }
+ if (!trace_amdgpu_cs_enabled())
+ return;
- if (!r && p->uf_entry.robj) {
- struct amdgpu_bo *uf = p->uf_entry.robj;
+ for (i = 0; i < p->gang_size; ++i) {
+ struct amdgpu_job *job = p->jobs[i];
- r = amdgpu_ttm_bind(&uf->tbo, &uf->tbo.mem);
- p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
+ for (j = 0; j < job->num_ibs; ++j)
+ trace_amdgpu_cs(p, job, &job->ibs[j]);
}
+}
-error_validate:
- if (r) {
- amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm);
- ttm_eu_backoff_reservation(&p->ticket, &p->validated);
- }
+static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job)
+{
+ struct amdgpu_ring *ring = amdgpu_job_ring(job);
+ unsigned int i;
+ int r;
-error_free_pages:
+ /* Only for UVD/VCE VM emulation */
+ if (!ring->funcs->parse_cs && !ring->funcs->patch_cs_in_place)
+ return 0;
- if (need_mmap_lock)
- up_read(&current->mm->mmap_sem);
+ for (i = 0; i < job->num_ibs; ++i) {
+ struct amdgpu_ib *ib = &job->ibs[i];
+ struct amdgpu_bo_va_mapping *m;
+ struct amdgpu_bo *aobj;
+ uint64_t va_start;
+ uint8_t *kptr;
+
+ va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK;
+ r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
+ if (r) {
+ drm_err(adev_to_drm(p->adev), "IB va_start is invalid\n");
+ return r;
+ }
- if (p->bo_list) {
- for (i = p->bo_list->first_userptr;
- i < p->bo_list->num_entries; ++i) {
- e = &p->bo_list->array[i];
+ if ((va_start + ib->length_dw * 4) >
+ (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
+ drm_err(adev_to_drm(p->adev), "IB va_start+ib_bytes is invalid\n");
+ return -EINVAL;
+ }
- if (!e->user_pages)
- continue;
+ /* the IB should be reserved at this point */
+ r = amdgpu_bo_kmap(aobj, (void **)&kptr);
+ if (r)
+ return r;
+
+ kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE);
- release_pages(e->user_pages,
- e->robj->tbo.ttm->num_pages,
- false);
- drm_free_large(e->user_pages);
+ if (ring->funcs->parse_cs) {
+ memcpy(ib->ptr, kptr, ib->length_dw * 4);
+ amdgpu_bo_kunmap(aobj);
+
+ r = amdgpu_ring_parse_cs(ring, p, job, ib);
+ if (r)
+ return r;
+
+ if (ib->sa_bo)
+ ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
+ } else {
+ ib->ptr = (uint32_t *)kptr;
+ r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib);
+ amdgpu_bo_kunmap(aobj);
+ if (r)
+ return r;
}
}
- return r;
+ return 0;
}
-static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
+static int amdgpu_cs_patch_jobs(struct amdgpu_cs_parser *p)
{
- struct amdgpu_bo_list_entry *e;
+ unsigned int i;
int r;
- list_for_each_entry(e, &p->validated, tv.head) {
- struct reservation_object *resv = e->robj->tbo.resv;
- r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp);
-
+ for (i = 0; i < p->gang_size; ++i) {
+ r = amdgpu_cs_patch_ibs(p, p->jobs[i]);
if (r)
return r;
}
return 0;
}
-/**
- * cs_parser_fini() - clean parser states
- * @parser: parser structure holding parsing context.
- * @error: error number
- *
- * If error is set than unvalidate buffer, otherwise just free memory
- * used by parsing context.
- **/
-static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
-{
- struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
- unsigned i;
-
- if (!error) {
- amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm);
-
- ttm_eu_fence_buffer_objects(&parser->ticket,
- &parser->validated,
- parser->fence);
- } else if (backoff) {
- ttm_eu_backoff_reservation(&parser->ticket,
- &parser->validated);
- }
- dma_fence_put(parser->fence);
-
- if (parser->ctx)
- amdgpu_ctx_put(parser->ctx);
- if (parser->bo_list)
- amdgpu_bo_list_put(parser->bo_list);
-
- for (i = 0; i < parser->nchunks; i++)
- drm_free_large(parser->chunks[i].kdata);
- kfree(parser->chunks);
- if (parser->job)
- amdgpu_job_free(parser->job);
- amdgpu_bo_unref(&parser->uf_entry.robj);
-}
-
-static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
+static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
{
- struct amdgpu_device *adev = p->adev;
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_job *job = p->gang_leader;
+ struct amdgpu_device *adev = p->adev;
struct amdgpu_vm *vm = &fpriv->vm;
+ struct amdgpu_bo_list_entry *e;
struct amdgpu_bo_va *bo_va;
- struct amdgpu_bo *bo;
- int i, r;
+ unsigned int i;
+ int r;
- r = amdgpu_vm_update_directories(adev, vm);
- if (r)
- return r;
+ /*
+ * We can't use gang submit on with reserved VMIDs when the VM changes
+ * can't be invalidated by more than one engine at the same time.
+ */
+ if (p->gang_size > 1 && !adev->vm_manager.concurrent_flush) {
+ for (i = 0; i < p->gang_size; ++i) {
+ struct drm_sched_entity *entity = p->entities[i];
+ struct drm_gpu_scheduler *sched = entity->rq->sched;
+ struct amdgpu_ring *ring = to_amdgpu_ring(sched);
- r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_dir_update);
- if (r)
- return r;
+ if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub))
+ return -EINVAL;
+ }
+ }
+
+ if (!amdgpu_vm_ready(vm))
+ return -EINVAL;
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
@@ -793,349 +1118,354 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(adev, &p->job->sync,
- fpriv->prt_va->last_pt_update);
+ r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update,
+ GFP_KERNEL);
if (r)
return r;
- if (amdgpu_sriov_vf(adev)) {
- struct dma_fence *f;
- bo_va = vm->csa_bo_va;
+ if (fpriv->csa_va) {
+ bo_va = fpriv->csa_va;
BUG_ON(!bo_va);
r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
- f = bo_va->last_pt_update;
- r = amdgpu_sync_fence(adev, &p->job->sync, f);
+ r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update,
+ GFP_KERNEL);
if (r)
return r;
}
- if (p->bo_list) {
- for (i = 0; i < p->bo_list->num_entries; i++) {
- struct dma_fence *f;
+ /* FIXME: In theory this loop shouldn't be needed any more when
+ * amdgpu_vm_handle_moved handles all moved BOs that are reserved
+ * with p->ticket. But removing it caused test regressions, so I'm
+ * leaving it here for now.
+ */
+ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
+ bo_va = e->bo_va;
+ if (bo_va == NULL)
+ continue;
- /* ignore duplicates */
- bo = p->bo_list->array[i].robj;
- if (!bo)
- continue;
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
+ if (r)
+ return r;
- bo_va = p->bo_list->array[i].bo_va;
- if (bo_va == NULL)
- continue;
+ r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update,
+ GFP_KERNEL);
+ if (r)
+ return r;
+ }
- r = amdgpu_vm_bo_update(adev, bo_va, false);
- if (r)
- return r;
+ r = amdgpu_vm_handle_moved(adev, vm, &p->exec.ticket);
+ if (r)
+ return r;
- f = bo_va->last_pt_update;
- r = amdgpu_sync_fence(adev, &p->job->sync, f);
- if (r)
- return r;
- }
+ r = amdgpu_vm_update_pdes(adev, vm, false);
+ if (r)
+ return r;
- }
+ r = amdgpu_sync_fence(&p->sync, vm->last_update, GFP_KERNEL);
+ if (r)
+ return r;
+
+ for (i = 0; i < p->gang_size; ++i) {
+ job = p->jobs[i];
- r = amdgpu_vm_clear_invalids(adev, vm, &p->job->sync);
+ if (!job->vm)
+ continue;
+
+ job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
+ }
- if (amdgpu_vm_debug && p->bo_list) {
+ if (adev->debug_vm) {
/* Invalidate all BOs to test for userspace bugs */
- for (i = 0; i < p->bo_list->num_entries; i++) {
+ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
+ struct amdgpu_bo *bo = e->bo;
+
/* ignore duplicates */
- bo = p->bo_list->array[i].robj;
if (!bo)
continue;
- amdgpu_vm_bo_invalidate(adev, bo);
+ amdgpu_vm_bo_invalidate(bo, false);
}
}
- return r;
+ return 0;
}
-static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
- struct amdgpu_cs_parser *p)
+static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- struct amdgpu_vm *vm = &fpriv->vm;
- struct amdgpu_ring *ring = p->job->ring;
- int i, r;
+ struct drm_gpu_scheduler *sched;
+ struct drm_gem_object *obj;
+ struct dma_fence *fence;
+ unsigned long index;
+ unsigned int i;
+ int r;
- /* Only for UVD/VCE VM emulation */
- if (ring->funcs->parse_cs) {
- for (i = 0; i < p->job->num_ibs; i++) {
- r = amdgpu_ring_parse_cs(ring, p, i);
- if (r)
- return r;
- }
+ r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
+ if (r) {
+ if (r != -ERESTARTSYS)
+ drm_err(adev_to_drm(p->adev), "amdgpu_ctx_wait_prev_fence failed.\n");
+ return r;
}
- if (p->job->vm) {
- p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.bo);
+ drm_exec_for_each_locked_object(&p->exec, index, obj) {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ struct dma_resv *resv = bo->tbo.base.resv;
+ enum amdgpu_sync_mode sync_mode;
- r = amdgpu_bo_vm_update_pte(p);
+ sync_mode = amdgpu_bo_explicit_sync(bo) ?
+ AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
+ r = amdgpu_sync_resv(p->adev, &p->sync, resv, sync_mode,
+ &fpriv->vm);
if (r)
return r;
}
- return amdgpu_cs_sync_rings(p);
-}
-
-static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
- struct amdgpu_cs_parser *parser)
-{
- struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
- struct amdgpu_vm *vm = &fpriv->vm;
- int i, j;
- int r, ce_preempt = 0, de_preempt = 0;
-
- for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
- struct amdgpu_cs_chunk *chunk;
- struct amdgpu_ib *ib;
- struct drm_amdgpu_cs_chunk_ib *chunk_ib;
- struct amdgpu_ring *ring;
+ for (i = 0; i < p->gang_size; ++i) {
+ r = amdgpu_sync_push_to_job(&p->sync, p->jobs[i]);
+ if (r)
+ return r;
+ }
- chunk = &parser->chunks[i];
- ib = &parser->job->ibs[j];
- chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
+ sched = p->gang_leader->base.entity->rq->sched;
+ while ((fence = amdgpu_sync_get_fence(&p->sync))) {
+ struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
- if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
+ /*
+ * When we have an dependency it might be necessary to insert a
+ * pipeline sync to make sure that all caches etc are flushed and the
+ * next job actually sees the results from the previous one
+ * before we start executing on the same scheduler ring.
+ */
+ if (!s_fence || s_fence->sched != sched) {
+ dma_fence_put(fence);
continue;
-
- if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && amdgpu_sriov_vf(adev)) {
- if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
- if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
- ce_preempt++;
- else
- de_preempt++;
- }
-
- /* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
- if (ce_preempt > 1 || de_preempt > 1)
- return -EINVAL;
}
- r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type,
- chunk_ib->ip_instance, chunk_ib->ring,
- &ring);
+ r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence,
+ GFP_KERNEL);
+ dma_fence_put(fence);
if (r)
return r;
+ }
+ return 0;
+}
- if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) {
- parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
- if (!parser->ctx->preamble_presented) {
- parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
- parser->ctx->preamble_presented = true;
- }
- }
-
- if (parser->job->ring && parser->job->ring != ring)
- return -EINVAL;
-
- parser->job->ring = ring;
-
- if (ring->funcs->parse_cs) {
- struct amdgpu_bo_va_mapping *m;
- struct amdgpu_bo *aobj = NULL;
- uint64_t offset;
- uint8_t *kptr;
-
- m = amdgpu_cs_find_mapping(parser, chunk_ib->va_start,
- &aobj);
- if (!aobj) {
- DRM_ERROR("IB va_start is invalid\n");
- return -EINVAL;
- }
+static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
+{
+ int i;
- if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
- (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
- DRM_ERROR("IB va_start+ib_bytes is invalid\n");
- return -EINVAL;
- }
+ for (i = 0; i < p->num_post_deps; ++i) {
+ if (p->post_deps[i].chain && p->post_deps[i].point) {
+ drm_syncobj_add_point(p->post_deps[i].syncobj,
+ p->post_deps[i].chain,
+ p->fence, p->post_deps[i].point);
+ p->post_deps[i].chain = NULL;
+ } else {
+ drm_syncobj_replace_fence(p->post_deps[i].syncobj,
+ p->fence);
+ }
+ }
+}
- /* the IB should be reserved at this point */
- r = amdgpu_bo_kmap(aobj, (void **)&kptr);
- if (r) {
- return r;
- }
+static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
+ union drm_amdgpu_cs *cs)
+{
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_job *leader = p->gang_leader;
+ struct amdgpu_bo_list_entry *e;
+ struct drm_gem_object *gobj;
+ unsigned long index;
+ unsigned int i;
+ uint64_t seq;
+ int r;
- offset = m->start * AMDGPU_GPU_PAGE_SIZE;
- kptr += chunk_ib->va_start - offset;
+ for (i = 0; i < p->gang_size; ++i)
+ drm_sched_job_arm(&p->jobs[i]->base);
- r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
- if (r) {
- DRM_ERROR("Failed to get ib !\n");
- return r;
- }
+ for (i = 0; i < p->gang_size; ++i) {
+ struct dma_fence *fence;
- memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
- amdgpu_bo_kunmap(aobj);
- } else {
- r = amdgpu_ib_get(adev, vm, 0, ib);
- if (r) {
- DRM_ERROR("Failed to get ib !\n");
- return r;
- }
+ if (p->jobs[i] == leader)
+ continue;
+ fence = &p->jobs[i]->base.s_fence->scheduled;
+ dma_fence_get(fence);
+ r = drm_sched_job_add_dependency(&leader->base, fence);
+ if (r) {
+ dma_fence_put(fence);
+ return r;
}
-
- ib->gpu_addr = chunk_ib->va_start;
- ib->length_dw = chunk_ib->ib_bytes / 4;
- ib->flags = chunk_ib->flags;
- j++;
}
- /* UVD & VCE fw doesn't support user fences */
- if (parser->job->uf_addr && (
- parser->job->ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
- parser->job->ring->funcs->type == AMDGPU_RING_TYPE_VCE))
- return -EINVAL;
+ if (p->gang_size > 1) {
+ for (i = 0; i < p->gang_size; ++i)
+ amdgpu_job_set_gang_leader(p->jobs[i], leader);
+ }
- return 0;
-}
+ /* No memory allocation is allowed while holding the notifier lock.
+ * The lock is held until amdgpu_cs_submit is finished and fence is
+ * added to BOs.
+ */
+ mutex_lock(&p->adev->notifier_lock);
-static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
- struct amdgpu_cs_parser *p)
-{
- struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- int i, j, r;
+ /* If userptr are invalidated after amdgpu_cs_parser_bos(), return
+ * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
+ */
+ r = 0;
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ r |= !amdgpu_ttm_tt_get_user_pages_done(e->bo->tbo.ttm,
+ e->range);
+ e->range = NULL;
+ }
+ if (r) {
+ r = -EAGAIN;
+ mutex_unlock(&p->adev->notifier_lock);
+ return r;
+ }
- for (i = 0; i < p->nchunks; ++i) {
- struct drm_amdgpu_cs_chunk_dep *deps;
- struct amdgpu_cs_chunk *chunk;
- unsigned num_deps;
+ p->fence = dma_fence_get(&leader->base.s_fence->finished);
+ drm_exec_for_each_locked_object(&p->exec, index, gobj) {
- chunk = &p->chunks[i];
+ ttm_bo_move_to_lru_tail_unlocked(&gem_to_amdgpu_bo(gobj)->tbo);
- if (chunk->chunk_id != AMDGPU_CHUNK_ID_DEPENDENCIES)
- continue;
+ /* Everybody except for the gang leader uses READ */
+ for (i = 0; i < p->gang_size; ++i) {
+ if (p->jobs[i] == leader)
+ continue;
- deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
- num_deps = chunk->length_dw * 4 /
- sizeof(struct drm_amdgpu_cs_chunk_dep);
+ dma_resv_add_fence(gobj->resv,
+ &p->jobs[i]->base.s_fence->finished,
+ DMA_RESV_USAGE_READ);
+ }
- for (j = 0; j < num_deps; ++j) {
- struct amdgpu_ring *ring;
- struct amdgpu_ctx *ctx;
- struct dma_fence *fence;
+ /* The gang leader as remembered as writer */
+ dma_resv_add_fence(gobj->resv, p->fence, DMA_RESV_USAGE_WRITE);
+ }
- r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
- deps[j].ip_instance,
- deps[j].ring, &ring);
- if (r)
- return r;
+ seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx],
+ p->fence);
+ amdgpu_cs_post_dependencies(p);
- ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id);
- if (ctx == NULL)
- return -EINVAL;
+ if ((leader->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
+ !p->ctx->preamble_presented) {
+ leader->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
+ p->ctx->preamble_presented = true;
+ }
- fence = amdgpu_ctx_get_fence(ctx, ring,
- deps[j].handle);
- if (IS_ERR(fence)) {
- r = PTR_ERR(fence);
- amdgpu_ctx_put(ctx);
- return r;
+ cs->out.handle = seq;
+ leader->uf_sequence = seq;
- } else if (fence) {
- r = amdgpu_sync_fence(adev, &p->job->sync,
- fence);
- dma_fence_put(fence);
- amdgpu_ctx_put(ctx);
- if (r)
- return r;
- }
- }
+ amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->exec.ticket);
+ for (i = 0; i < p->gang_size; ++i) {
+ amdgpu_job_free_resources(p->jobs[i]);
+ trace_amdgpu_cs_ioctl(p->jobs[i]);
+ drm_sched_entity_push_job(&p->jobs[i]->base);
+ p->jobs[i] = NULL;
}
+ amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
+
+ mutex_unlock(&p->adev->notifier_lock);
+ mutex_unlock(&p->bo_list->bo_list_mutex);
return 0;
}
-static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
- union drm_amdgpu_cs *cs)
+/* Cleanup the parser structure */
+static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
{
- struct amdgpu_ring *ring = p->job->ring;
- struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
- struct amdgpu_job *job;
- int r;
+ unsigned int i;
- job = p->job;
- p->job = NULL;
+ amdgpu_sync_free(&parser->sync);
+ drm_exec_fini(&parser->exec);
- r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
- if (r) {
- amdgpu_job_free(job);
- return r;
+ for (i = 0; i < parser->num_post_deps; i++) {
+ drm_syncobj_put(parser->post_deps[i].syncobj);
+ kfree(parser->post_deps[i].chain);
}
+ kfree(parser->post_deps);
- job->owner = p->filp;
- job->fence_ctx = entity->fence_context;
- p->fence = dma_fence_get(&job->base.s_fence->finished);
- cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
- job->uf_sequence = cs->out.handle;
- amdgpu_job_free_resources(job);
- amdgpu_cs_parser_fini(p, 0, true);
+ dma_fence_put(parser->fence);
- trace_amdgpu_cs_ioctl(job);
- amd_sched_entity_push_job(&job->base);
+ if (parser->ctx)
+ amdgpu_ctx_put(parser->ctx);
+ if (parser->bo_list)
+ amdgpu_bo_list_put(parser->bo_list);
- return 0;
+ for (i = 0; i < parser->nchunks; i++)
+ kvfree(parser->chunks[i].kdata);
+ kvfree(parser->chunks);
+ for (i = 0; i < parser->gang_size; ++i) {
+ if (parser->jobs[i])
+ amdgpu_job_free(parser->jobs[i]);
+ }
+ amdgpu_bo_unref(&parser->uf_bo);
}
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
- struct amdgpu_device *adev = dev->dev_private;
- union drm_amdgpu_cs *cs = data;
- struct amdgpu_cs_parser parser = {};
- bool reserved_buffers = false;
- int i, r;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_cs_parser parser;
+ int r;
+
+ if (amdgpu_ras_intr_triggered())
+ return -EHWPOISON;
if (!adev->accel_working)
return -EBUSY;
- parser.adev = adev;
- parser.filp = filp;
-
- r = amdgpu_cs_parser_init(&parser, data);
+ r = amdgpu_cs_parser_init(&parser, adev, filp, data);
if (r) {
- DRM_ERROR("Failed to initialize parser !\n");
- goto out;
+ drm_err_ratelimited(dev, "Failed to initialize parser %d!\n", r);
+ return r;
}
+ r = amdgpu_cs_pass1(&parser, data);
+ if (r)
+ goto error_fini;
+
+ r = amdgpu_cs_pass2(&parser);
+ if (r)
+ goto error_fini;
+
r = amdgpu_cs_parser_bos(&parser, data);
if (r) {
if (r == -ENOMEM)
- DRM_ERROR("Not enough memory for command submission!\n");
- else if (r != -ERESTARTSYS)
- DRM_ERROR("Failed to process the buffer list %d!\n", r);
- goto out;
+ drm_err(dev, "Not enough memory for command submission!\n");
+ else if (r != -ERESTARTSYS && r != -EAGAIN)
+ drm_dbg(dev, "Failed to process the buffer list %d!\n", r);
+ goto error_fini;
}
- reserved_buffers = true;
- r = amdgpu_cs_ib_fill(adev, &parser);
+ r = amdgpu_cs_patch_jobs(&parser);
if (r)
- goto out;
+ goto error_backoff;
- r = amdgpu_cs_dependencies(adev, &parser);
- if (r) {
- DRM_ERROR("Failed in the dependencies handling %d!\n", r);
- goto out;
- }
-
- for (i = 0; i < parser.job->num_ibs; i++)
- trace_amdgpu_cs(&parser, i);
+ r = amdgpu_cs_vm_handling(&parser);
+ if (r)
+ goto error_backoff;
- r = amdgpu_cs_ib_vm_chunk(adev, &parser);
+ r = amdgpu_cs_sync_rings(&parser);
if (r)
- goto out;
+ goto error_backoff;
+
+ trace_amdgpu_cs_ibs(&parser);
- r = amdgpu_cs_submit(&parser, cs);
+ r = amdgpu_cs_submit(&parser, data);
if (r)
- goto out;
+ goto error_backoff;
+ amdgpu_cs_parser_fini(&parser);
return 0;
-out:
- amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
+
+error_backoff:
+ mutex_unlock(&parser.bo_list->bo_list_mutex);
+
+error_fini:
+ amdgpu_cs_parser_fini(&parser);
return r;
}
@@ -1152,27 +1482,30 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
union drm_amdgpu_wait_cs *wait = data;
- struct amdgpu_device *adev = dev->dev_private;
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
- struct amdgpu_ring *ring = NULL;
+ struct drm_sched_entity *entity;
struct amdgpu_ctx *ctx;
struct dma_fence *fence;
long r;
- r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
- wait->in.ring, &ring);
- if (r)
- return r;
-
ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
if (ctx == NULL)
return -EINVAL;
- fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
+ r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
+ wait->in.ring, &entity);
+ if (r) {
+ amdgpu_ctx_put(ctx);
+ return r;
+ }
+
+ fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
if (IS_ERR(fence))
r = PTR_ERR(fence);
else if (fence) {
r = dma_fence_wait_timeout(fence, true, timeout);
+ if (r > 0 && fence->error)
+ r = fence->error;
dma_fence_put(fence);
} else
r = 1;
@@ -1198,28 +1531,90 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
struct drm_file *filp,
struct drm_amdgpu_fence *user)
{
- struct amdgpu_ring *ring;
+ struct drm_sched_entity *entity;
struct amdgpu_ctx *ctx;
struct dma_fence *fence;
int r;
- r = amdgpu_cs_get_ring(adev, user->ip_type, user->ip_instance,
- user->ring, &ring);
- if (r)
- return ERR_PTR(r);
-
ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
if (ctx == NULL)
return ERR_PTR(-EINVAL);
- fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
+ r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
+ user->ring, &entity);
+ if (r) {
+ amdgpu_ctx_put(ctx);
+ return ERR_PTR(r);
+ }
+
+ fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
amdgpu_ctx_put(ctx);
return fence;
}
+int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ union drm_amdgpu_fence_to_handle *info = data;
+ struct dma_fence *fence;
+ struct drm_syncobj *syncobj;
+ struct sync_file *sync_file;
+ int fd, r;
+
+ fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+
+ if (!fence)
+ fence = dma_fence_get_stub();
+
+ switch (info->in.what) {
+ case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
+ r = drm_syncobj_create(&syncobj, 0, fence);
+ dma_fence_put(fence);
+ if (r)
+ return r;
+ r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
+ drm_syncobj_put(syncobj);
+ return r;
+
+ case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
+ r = drm_syncobj_create(&syncobj, 0, fence);
+ dma_fence_put(fence);
+ if (r)
+ return r;
+ r = drm_syncobj_get_fd(syncobj, (int *)&info->out.handle);
+ drm_syncobj_put(syncobj);
+ return r;
+
+ case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0) {
+ dma_fence_put(fence);
+ return fd;
+ }
+
+ sync_file = sync_file_create(fence);
+ dma_fence_put(fence);
+ if (!sync_file) {
+ put_unused_fd(fd);
+ return -ENOMEM;
+ }
+
+ fd_install(fd, sync_file->file);
+ info->out.handle = fd;
+ return 0;
+
+ default:
+ dma_fence_put(fence);
+ return -EINVAL;
+ }
+}
+
/**
- * amdgpu_cs_wait_all_fence - wait on all fences to signal
+ * amdgpu_cs_wait_all_fences - wait on all fences to signal
*
* @adev: amdgpu device
* @filp: file private
@@ -1246,6 +1641,9 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
continue;
r = dma_fence_wait_timeout(fence, true, timeout);
+ if (r > 0 && fence->error)
+ r = fence->error;
+
dma_fence_put(fence);
if (r < 0)
return r;
@@ -1297,6 +1695,7 @@ static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
array[i] = fence;
} else { /* NULL, the fence has been already signaled */
r = 1;
+ first = i;
goto out;
}
}
@@ -1310,8 +1709,11 @@ out:
memset(wait, 0, sizeof(*wait));
wait->out.status = (r > 0);
wait->out.first_signaled = first;
- /* set return value 0 to indicate success */
- r = 0;
+
+ if (first < fence_count && array[first])
+ r = array[first]->error;
+ else
+ r = 0;
err_free_fence_array:
for (i = 0; i < fence_count; i++)
@@ -1331,120 +1733,75 @@ err_free_fence_array:
int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
union drm_amdgpu_wait_fences *wait = data;
- uint32_t fence_count = wait->in.fence_count;
- struct drm_amdgpu_fence *fences_user;
struct drm_amdgpu_fence *fences;
int r;
/* Get the fences from userspace */
- fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
- GFP_KERNEL);
- if (fences == NULL)
- return -ENOMEM;
-
- fences_user = (void __user *)(uintptr_t)(wait->in.fences);
- if (copy_from_user(fences, fences_user,
- sizeof(struct drm_amdgpu_fence) * fence_count)) {
- r = -EFAULT;
- goto err_free_fences;
- }
+ fences = memdup_array_user(u64_to_user_ptr(wait->in.fences),
+ wait->in.fence_count,
+ sizeof(struct drm_amdgpu_fence));
+ if (IS_ERR(fences))
+ return PTR_ERR(fences);
if (wait->in.wait_all)
r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
else
r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
-err_free_fences:
kfree(fences);
return r;
}
/**
- * amdgpu_cs_find_bo_va - find bo_va for VM address
+ * amdgpu_cs_find_mapping - find bo_va for VM address
*
* @parser: command submission parser context
* @addr: VM address
* @bo: resulting BO of the mapping found
+ * @map: Placeholder to return found BO mapping
*
* Search the buffer objects in the command submission context for a certain
* virtual memory address. Returns allocation structure when found, NULL
* otherwise.
*/
-struct amdgpu_bo_va_mapping *
-amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
- uint64_t addr, struct amdgpu_bo **bo)
+int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
+ uint64_t addr, struct amdgpu_bo **bo,
+ struct amdgpu_bo_va_mapping **map)
{
+ struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
+ struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va_mapping *mapping;
- unsigned i;
-
- if (!parser->bo_list)
- return NULL;
+ int i, r;
addr /= AMDGPU_GPU_PAGE_SIZE;
- for (i = 0; i < parser->bo_list->num_entries; i++) {
- struct amdgpu_bo_list_entry *lobj;
-
- lobj = &parser->bo_list->array[i];
- if (!lobj->bo_va)
- continue;
-
- list_for_each_entry(mapping, &lobj->bo_va->valids, list) {
- if (mapping->start > addr ||
- addr > mapping->last)
- continue;
-
- *bo = lobj->bo_va->bo;
- return mapping;
- }
-
- list_for_each_entry(mapping, &lobj->bo_va->invalids, list) {
- if (mapping->start > addr ||
- addr > mapping->last)
- continue;
-
- *bo = lobj->bo_va->bo;
- return mapping;
- }
- }
-
- return NULL;
-}
-
-/**
- * amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM
- *
- * @parser: command submission parser context
- *
- * Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM.
- */
-int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
-{
- unsigned i;
- int r;
-
- if (!parser->bo_list)
- return 0;
+ mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
+ if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
+ return -EINVAL;
- for (i = 0; i < parser->bo_list->num_entries; i++) {
- struct amdgpu_bo *bo = parser->bo_list->array[i].robj;
+ *bo = mapping->bo_va->base.bo;
+ *map = mapping;
- r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
- if (unlikely(r))
- return r;
+ /* Double check that the BO is reserved by this CS */
+ if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket)
+ return -EINVAL;
- if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
- continue;
+ /* Make sure VRAM is allocated contigiously */
+ (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ if ((*bo)->tbo.resource->mem_type == TTM_PL_VRAM &&
+ !((*bo)->tbo.resource->placement & TTM_PL_FLAG_CONTIGUOUS)) {
- bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (unlikely(r))
+ amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
+ for (i = 0; i < (*bo)->placement.num_placement; i++)
+ (*bo)->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
+ r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
+ if (r)
return r;
}
- return 0;
+ return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
new file mode 100644
index 000000000000..39c33ad100cb
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_CS_H__
+#define __AMDGPU_CS_H__
+
+#include <linux/ww_mutex.h>
+#include <drm/drm_exec.h>
+
+#include "amdgpu_job.h"
+#include "amdgpu_bo_list.h"
+#include "amdgpu_ring.h"
+
+#define AMDGPU_CS_GANG_SIZE 4
+
+struct amdgpu_bo_va_mapping;
+
+struct amdgpu_cs_chunk {
+ uint32_t chunk_id;
+ uint32_t length_dw;
+ void *kdata;
+};
+
+struct amdgpu_cs_post_dep {
+ struct drm_syncobj *syncobj;
+ struct dma_fence_chain *chain;
+ u64 point;
+};
+
+struct amdgpu_cs_parser {
+ struct amdgpu_device *adev;
+ struct drm_file *filp;
+ struct amdgpu_ctx *ctx;
+
+ /* chunks */
+ unsigned nchunks;
+ struct amdgpu_cs_chunk *chunks;
+
+ /* scheduler job objects */
+ unsigned int gang_size;
+ unsigned int gang_leader_idx;
+ struct drm_sched_entity *entities[AMDGPU_CS_GANG_SIZE];
+ struct amdgpu_job *jobs[AMDGPU_CS_GANG_SIZE];
+ struct amdgpu_job *gang_leader;
+
+ /* buffer objects */
+ struct drm_exec exec;
+ struct amdgpu_bo_list *bo_list;
+ struct amdgpu_mn *mn;
+ struct dma_fence *fence;
+ uint64_t bytes_moved_threshold;
+ uint64_t bytes_moved_vis_threshold;
+ uint64_t bytes_moved;
+ uint64_t bytes_moved_vis;
+
+ /* user fence */
+ struct amdgpu_bo *uf_bo;
+
+ unsigned num_post_deps;
+ struct amdgpu_cs_post_dep *post_deps;
+
+ struct amdgpu_sync sync;
+};
+
+int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
+ uint64_t addr, struct amdgpu_bo **bo,
+ struct amdgpu_bo_va_mapping **mapping);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
new file mode 100644
index 000000000000..02138aa55793
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+
+ * * Author: Monk.liu@amd.com
+ */
+
+#include <drm/drm_exec.h>
+
+#include "amdgpu.h"
+
+uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev)
+{
+ uint64_t addr = AMDGPU_VA_RESERVED_CSA_START(adev);
+
+ addr = amdgpu_gmc_sign_extend(addr);
+
+ return addr;
+}
+
+int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
+ u32 domain, uint32_t size)
+{
+ void *ptr;
+
+ amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
+ domain, bo,
+ NULL, &ptr);
+ if (!*bo)
+ return -ENOMEM;
+
+ memset(ptr, 0, size);
+ adev->virt.csa_cpu_addr = ptr;
+ return 0;
+}
+
+void amdgpu_free_static_csa(struct amdgpu_bo **bo)
+{
+ amdgpu_bo_free_kernel(bo, NULL, NULL);
+}
+
+/*
+ * amdgpu_map_static_csa should be called during amdgpu_vm_init
+ * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command
+ * submission of GFX should use this virtual address within META_DATA init
+ * package to support SRIOV gfx preemption.
+ */
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
+ uint64_t csa_addr, uint32_t size)
+{
+ struct drm_exec exec;
+ int r;
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_until_all_locked(&exec) {
+ r = amdgpu_vm_lock_pd(vm, &exec, 0);
+ if (likely(!r))
+ r = drm_exec_lock_obj(&exec, &bo->tbo.base);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r)) {
+ DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
+ goto error;
+ }
+ }
+
+ *bo_va = amdgpu_vm_bo_add(adev, vm, bo);
+ if (!*bo_va) {
+ r = -ENOMEM;
+ goto error;
+ }
+
+ r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
+ AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
+ AMDGPU_PTE_EXECUTABLE);
+
+ if (r) {
+ DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
+ amdgpu_vm_bo_del(adev, *bo_va);
+ goto error;
+ }
+
+error:
+ drm_exec_fini(&exec);
+ return r;
+}
+
+int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va,
+ uint64_t csa_addr)
+{
+ struct drm_exec exec;
+ int r;
+
+ drm_exec_init(&exec, 0, 0);
+ drm_exec_until_all_locked(&exec) {
+ r = amdgpu_vm_lock_pd(vm, &exec, 0);
+ if (likely(!r))
+ r = drm_exec_lock_obj(&exec, &bo->tbo.base);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r)) {
+ DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
+ goto error;
+ }
+ }
+
+ r = amdgpu_vm_bo_unmap(adev, bo_va, csa_addr);
+ if (r) {
+ DRM_ERROR("failed to do bo_unmap on static CSA, err=%d\n", r);
+ goto error;
+ }
+
+ amdgpu_vm_bo_del(adev, bo_va);
+
+error:
+ drm_exec_fini(&exec);
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h
new file mode 100644
index 000000000000..7dfc1f2012eb
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Monk.liu@amd.com
+ */
+
+#ifndef AMDGPU_CSA_MANAGER_H
+#define AMDGPU_CSA_MANAGER_H
+
+#define AMDGPU_CSA_SIZE (128 * 1024)
+
+uint32_t amdgpu_get_total_csa_size(struct amdgpu_device *adev);
+uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev);
+int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo,
+ u32 domain, uint32_t size);
+int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
+ uint64_t csa_addr, uint32_t size);
+int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va,
+ uint64_t csa_addr);
+void amdgpu_free_static_csa(struct amdgpu_bo **bo);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 90d1ac8a80f8..f5d5c45ddc0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -22,74 +22,458 @@
* Authors: monk liu <monk.liu@amd.com>
*/
-#include <drm/drmP.h>
+#include <drm/drm_auth.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
+#include "amdgpu_sched.h"
+#include "amdgpu_ras.h"
+#include <linux/nospec.h>
+
+#define to_amdgpu_ctx_entity(e) \
+ container_of((e), struct amdgpu_ctx_entity, entity)
+
+const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
+ [AMDGPU_HW_IP_GFX] = 1,
+ [AMDGPU_HW_IP_COMPUTE] = 4,
+ [AMDGPU_HW_IP_DMA] = 2,
+ [AMDGPU_HW_IP_UVD] = 1,
+ [AMDGPU_HW_IP_VCE] = 1,
+ [AMDGPU_HW_IP_UVD_ENC] = 1,
+ [AMDGPU_HW_IP_VCN_DEC] = 1,
+ [AMDGPU_HW_IP_VCN_ENC] = 1,
+ [AMDGPU_HW_IP_VCN_JPEG] = 1,
+ [AMDGPU_HW_IP_VPE] = 1,
+};
+
+bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
+{
+ switch (ctx_prio) {
+ case AMDGPU_CTX_PRIORITY_VERY_LOW:
+ case AMDGPU_CTX_PRIORITY_LOW:
+ case AMDGPU_CTX_PRIORITY_NORMAL:
+ case AMDGPU_CTX_PRIORITY_HIGH:
+ case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+ return true;
+ default:
+ case AMDGPU_CTX_PRIORITY_UNSET:
+ /* UNSET priority is not valid and we don't carry that
+ * around, but set it to NORMAL in the only place this
+ * function is called, amdgpu_ctx_ioctl().
+ */
+ return false;
+ }
+}
-static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
+static enum drm_sched_priority
+amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
{
- unsigned i, j;
+ switch (ctx_prio) {
+ case AMDGPU_CTX_PRIORITY_UNSET:
+ pr_warn_once("AMD-->DRM context priority value UNSET-->NORMAL");
+ return DRM_SCHED_PRIORITY_NORMAL;
+
+ case AMDGPU_CTX_PRIORITY_VERY_LOW:
+ return DRM_SCHED_PRIORITY_LOW;
+
+ case AMDGPU_CTX_PRIORITY_LOW:
+ return DRM_SCHED_PRIORITY_LOW;
+
+ case AMDGPU_CTX_PRIORITY_NORMAL:
+ return DRM_SCHED_PRIORITY_NORMAL;
+
+ case AMDGPU_CTX_PRIORITY_HIGH:
+ return DRM_SCHED_PRIORITY_HIGH;
+
+ case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+ return DRM_SCHED_PRIORITY_HIGH;
+
+ /* This should not happen as we sanitized userspace provided priority
+ * already, WARN if this happens.
+ */
+ default:
+ WARN(1, "Invalid context priority %d\n", ctx_prio);
+ return DRM_SCHED_PRIORITY_NORMAL;
+ }
+
+}
+
+static int amdgpu_ctx_priority_permit(struct drm_file *filp,
+ int32_t priority)
+{
+ /* NORMAL and below are accessible by everyone */
+ if (priority <= AMDGPU_CTX_PRIORITY_NORMAL)
+ return 0;
+
+ if (capable(CAP_SYS_NICE))
+ return 0;
+
+ if (drm_is_current_master(filp))
+ return 0;
+
+ return -EACCES;
+}
+
+static enum amdgpu_gfx_pipe_priority amdgpu_ctx_prio_to_gfx_pipe_prio(int32_t prio)
+{
+ switch (prio) {
+ case AMDGPU_CTX_PRIORITY_HIGH:
+ case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+ return AMDGPU_GFX_PIPE_PRIO_HIGH;
+ default:
+ return AMDGPU_GFX_PIPE_PRIO_NORMAL;
+ }
+}
+
+static enum amdgpu_ring_priority_level amdgpu_ctx_sched_prio_to_ring_prio(int32_t prio)
+{
+ switch (prio) {
+ case AMDGPU_CTX_PRIORITY_HIGH:
+ return AMDGPU_RING_PRIO_1;
+ case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+ return AMDGPU_RING_PRIO_2;
+ default:
+ return AMDGPU_RING_PRIO_0;
+ }
+}
+
+static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
+{
+ struct amdgpu_device *adev = ctx->mgr->adev;
+ unsigned int hw_prio;
+ int32_t ctx_prio;
+
+ ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
+ ctx->init_priority : ctx->override_priority;
+
+ switch (hw_ip) {
+ case AMDGPU_HW_IP_GFX:
+ case AMDGPU_HW_IP_COMPUTE:
+ hw_prio = amdgpu_ctx_prio_to_gfx_pipe_prio(ctx_prio);
+ break;
+ case AMDGPU_HW_IP_VCE:
+ case AMDGPU_HW_IP_VCN_ENC:
+ hw_prio = amdgpu_ctx_sched_prio_to_ring_prio(ctx_prio);
+ break;
+ default:
+ hw_prio = AMDGPU_RING_PRIO_DEFAULT;
+ break;
+ }
+
+ hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
+ if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
+ hw_prio = AMDGPU_RING_PRIO_DEFAULT;
+
+ return hw_prio;
+}
+
+/* Calculate the time spend on the hw */
+static ktime_t amdgpu_ctx_fence_time(struct dma_fence *fence)
+{
+ struct drm_sched_fence *s_fence;
+
+ if (!fence)
+ return ns_to_ktime(0);
+
+ /* When the fence is not even scheduled it can't have spend time */
+ s_fence = to_drm_sched_fence(fence);
+ if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->scheduled.flags))
+ return ns_to_ktime(0);
+
+ /* When it is still running account how much already spend */
+ if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->finished.flags))
+ return ktime_sub(ktime_get(), s_fence->scheduled.timestamp);
+
+ return ktime_sub(s_fence->finished.timestamp,
+ s_fence->scheduled.timestamp);
+}
+
+static ktime_t amdgpu_ctx_entity_time(struct amdgpu_ctx *ctx,
+ struct amdgpu_ctx_entity *centity)
+{
+ ktime_t res = ns_to_ktime(0);
+ uint32_t i;
+
+ spin_lock(&ctx->ring_lock);
+ for (i = 0; i < amdgpu_sched_jobs; i++) {
+ res = ktime_add(res, amdgpu_ctx_fence_time(centity->fences[i]));
+ }
+ spin_unlock(&ctx->ring_lock);
+ return res;
+}
+
+static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
+ const u32 ring)
+{
+ struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
+ struct amdgpu_device *adev = ctx->mgr->adev;
+ struct amdgpu_ctx_entity *entity;
+ enum drm_sched_priority drm_prio;
+ unsigned int hw_prio, num_scheds;
+ int32_t ctx_prio;
+ int r;
+
+ entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs),
+ GFP_KERNEL);
+ if (!entity)
+ return -ENOMEM;
+
+ ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
+ ctx->init_priority : ctx->override_priority;
+ entity->hw_ip = hw_ip;
+ entity->sequence = 1;
+ hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
+ drm_prio = amdgpu_ctx_to_drm_sched_prio(ctx_prio);
+
+ hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
+
+ if (!(adev)->xcp_mgr) {
+ scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
+ num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
+ } else {
+ struct amdgpu_fpriv *fpriv;
+
+ fpriv = container_of(ctx->ctx_mgr, struct amdgpu_fpriv, ctx_mgr);
+ r = amdgpu_xcp_select_scheds(adev, hw_ip, hw_prio, fpriv,
+ &num_scheds, &scheds);
+ if (r)
+ goto cleanup_entity;
+ }
+
+ /* disable load balance if the hw engine retains context among dependent jobs */
+ if (hw_ip == AMDGPU_HW_IP_VCN_ENC ||
+ hw_ip == AMDGPU_HW_IP_VCN_DEC ||
+ hw_ip == AMDGPU_HW_IP_UVD_ENC ||
+ hw_ip == AMDGPU_HW_IP_UVD) {
+ sched = drm_sched_pick_best(scheds, num_scheds);
+ scheds = &sched;
+ num_scheds = 1;
+ }
+
+ r = drm_sched_entity_init(&entity->entity, drm_prio, scheds, num_scheds,
+ &ctx->guilty);
+ if (r)
+ goto error_free_entity;
+
+ /* It's not an error if we fail to install the new entity */
+ if (cmpxchg(&ctx->entities[hw_ip][ring], NULL, entity))
+ goto cleanup_entity;
+
+ return 0;
+
+cleanup_entity:
+ drm_sched_entity_fini(&entity->entity);
+
+error_free_entity:
+ kfree(entity);
+
+ return r;
+}
+
+static ktime_t amdgpu_ctx_fini_entity(struct amdgpu_device *adev,
+ struct amdgpu_ctx_entity *entity)
+{
+ ktime_t res = ns_to_ktime(0);
+ int i;
+
+ if (!entity)
+ return res;
+
+ for (i = 0; i < amdgpu_sched_jobs; ++i) {
+ res = ktime_add(res, amdgpu_ctx_fence_time(entity->fences[i]));
+ dma_fence_put(entity->fences[i]);
+ }
+
+ amdgpu_xcp_release_sched(adev, entity);
+
+ kfree(entity);
+ return res;
+}
+
+static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
+ u32 *stable_pstate)
+{
+ struct amdgpu_device *adev = ctx->mgr->adev;
+ enum amd_dpm_forced_level current_level;
+
+ current_level = amdgpu_dpm_get_performance_level(adev);
+
+ switch (current_level) {
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_STANDARD;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_PEAK;
+ break;
+ default:
+ *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE;
+ break;
+ }
+ return 0;
+}
+
+static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority,
+ struct drm_file *filp, struct amdgpu_ctx *ctx)
+{
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ u32 current_stable_pstate;
int r;
+ r = amdgpu_ctx_priority_permit(filp, priority);
+ if (r)
+ return r;
+
memset(ctx, 0, sizeof(*ctx));
- ctx->adev = adev;
+
kref_init(&ctx->refcount);
+ ctx->mgr = mgr;
spin_lock_init(&ctx->ring_lock);
- ctx->fences = kcalloc(amdgpu_sched_jobs * AMDGPU_MAX_RINGS,
- sizeof(struct dma_fence*), GFP_KERNEL);
- if (!ctx->fences)
- return -ENOMEM;
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- ctx->rings[i].sequence = 1;
- ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
- }
+ ctx->reset_counter = atomic_read(&mgr->adev->gpu_reset_counter);
+ ctx->reset_counter_query = ctx->reset_counter;
+ ctx->generation = amdgpu_vm_generation(mgr->adev, &fpriv->vm);
+ ctx->init_priority = priority;
+ ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET;
+
+ r = amdgpu_ctx_get_stable_pstate(ctx, &current_stable_pstate);
+ if (r)
+ return r;
- ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
+ if (mgr->adev->pm.stable_pstate_ctx)
+ ctx->stable_pstate = mgr->adev->pm.stable_pstate_ctx->stable_pstate;
+ else
+ ctx->stable_pstate = current_stable_pstate;
- /* create context entity for each ring */
- for (i = 0; i < adev->num_rings; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
- struct amd_sched_rq *rq;
+ ctx->ctx_mgr = &(fpriv->ctx_mgr);
+ return 0;
+}
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
- r = amd_sched_entity_init(&ring->sched, &ctx->rings[i].entity,
- rq, amdgpu_sched_jobs);
- if (r)
- goto failed;
+static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
+ u32 stable_pstate)
+{
+ struct amdgpu_device *adev = ctx->mgr->adev;
+ enum amd_dpm_forced_level level;
+ u32 current_stable_pstate;
+ int r;
+
+ mutex_lock(&adev->pm.stable_pstate_ctx_lock);
+ if (adev->pm.stable_pstate_ctx && adev->pm.stable_pstate_ctx != ctx) {
+ r = -EBUSY;
+ goto done;
}
- return 0;
+ r = amdgpu_ctx_get_stable_pstate(ctx, &current_stable_pstate);
+ if (r || (stable_pstate == current_stable_pstate))
+ goto done;
+
+ switch (stable_pstate) {
+ case AMDGPU_CTX_STABLE_PSTATE_NONE:
+ level = AMD_DPM_FORCED_LEVEL_AUTO;
+ break;
+ case AMDGPU_CTX_STABLE_PSTATE_STANDARD:
+ level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
+ break;
+ case AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK:
+ level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
+ break;
+ case AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK:
+ level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
+ break;
+ case AMDGPU_CTX_STABLE_PSTATE_PEAK:
+ level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+ break;
+ default:
+ r = -EINVAL;
+ goto done;
+ }
+
+ r = amdgpu_dpm_force_performance_level(adev, level);
+
+ if (level == AMD_DPM_FORCED_LEVEL_AUTO)
+ adev->pm.stable_pstate_ctx = NULL;
+ else
+ adev->pm.stable_pstate_ctx = ctx;
+done:
+ mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
-failed:
- for (j = 0; j < i; j++)
- amd_sched_entity_fini(&adev->rings[j]->sched,
- &ctx->rings[j].entity);
- kfree(ctx->fences);
- ctx->fences = NULL;
return r;
}
-static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
+static void amdgpu_ctx_fini(struct kref *ref)
{
- struct amdgpu_device *adev = ctx->adev;
- unsigned i, j;
+ struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
+ struct amdgpu_ctx_mgr *mgr = ctx->mgr;
+ struct amdgpu_device *adev = mgr->adev;
+ unsigned i, j, idx;
if (!adev)
return;
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
- for (j = 0; j < amdgpu_sched_jobs; ++j)
- dma_fence_put(ctx->rings[i].fences[j]);
- kfree(ctx->fences);
- ctx->fences = NULL;
+ for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+ for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) {
+ ktime_t spend;
- for (i = 0; i < adev->num_rings; i++)
- amd_sched_entity_fini(&adev->rings[i]->sched,
- &ctx->rings[i].entity);
+ spend = amdgpu_ctx_fini_entity(adev, ctx->entities[i][j]);
+ atomic64_add(ktime_to_ns(spend), &mgr->time_spend[i]);
+ }
+ }
+
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ amdgpu_ctx_set_stable_pstate(ctx, ctx->stable_pstate);
+ drm_dev_exit(idx);
+ }
+
+ kfree(ctx);
+}
+
+int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
+ u32 ring, struct drm_sched_entity **entity)
+{
+ int r;
+ struct drm_sched_entity *ctx_entity;
+
+ if (hw_ip >= AMDGPU_HW_IP_NUM) {
+ DRM_ERROR("unknown HW IP type: %d\n", hw_ip);
+ return -EINVAL;
+ }
+
+ /* Right now all IPs have only one instance - multiple rings. */
+ if (instance != 0) {
+ DRM_DEBUG("invalid ip instance: %d\n", instance);
+ return -EINVAL;
+ }
+
+ if (ring >= amdgpu_ctx_num_entities[hw_ip]) {
+ DRM_DEBUG("invalid ring: %d %d\n", hw_ip, ring);
+ return -EINVAL;
+ }
+
+ if (ctx->entities[hw_ip][ring] == NULL) {
+ r = amdgpu_ctx_init_entity(ctx, hw_ip, ring);
+ if (r)
+ return r;
+ }
+
+ ctx_entity = &ctx->entities[hw_ip][ring]->entity;
+ r = drm_sched_entity_error(ctx_entity);
+ if (r) {
+ DRM_DEBUG("error entity %p\n", ctx_entity);
+ return r;
+ }
+
+ *entity = ctx_entity;
+ return 0;
}
static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv,
+ struct drm_file *filp,
+ int32_t priority,
uint32_t *id)
{
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
@@ -101,14 +485,15 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
return -ENOMEM;
mutex_lock(&mgr->lock);
- r = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
+ r = idr_alloc(&mgr->ctx_handles, ctx, 1, AMDGPU_VM_MAX_NUM_CTX, GFP_KERNEL);
if (r < 0) {
mutex_unlock(&mgr->lock);
kfree(ctx);
return r;
}
+
*id = (uint32_t)r;
- r = amdgpu_ctx_init(adev, ctx);
+ r = amdgpu_ctx_init(mgr, priority, filp, ctx);
if (r) {
idr_remove(&mgr->ctx_handles, *id);
*id = 0;
@@ -121,12 +506,19 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
static void amdgpu_ctx_do_release(struct kref *ref)
{
struct amdgpu_ctx *ctx;
+ u32 i, j;
ctx = container_of(ref, struct amdgpu_ctx, refcount);
+ for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+ for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
+ if (!ctx->entities[i][j])
+ continue;
- amdgpu_ctx_fini(ctx);
+ drm_sched_entity_destroy(&ctx->entities[i][j]->entity);
+ }
+ }
- kfree(ctx);
+ amdgpu_ctx_fini(ref);
}
static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id)
@@ -168,40 +560,166 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev,
/* determine if a GPU reset has occured since the last call */
reset_counter = atomic_read(&adev->gpu_reset_counter);
/* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
- if (ctx->reset_counter == reset_counter)
+ if (ctx->reset_counter_query == reset_counter)
out->state.reset_status = AMDGPU_CTX_NO_RESET;
else
out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
- ctx->reset_counter = reset_counter;
+ ctx->reset_counter_query = reset_counter;
+
+ mutex_unlock(&mgr->lock);
+ return 0;
+}
+
+#define AMDGPU_RAS_COUNTE_DELAY_MS 3000
+
+static int amdgpu_ctx_query2(struct amdgpu_device *adev,
+ struct amdgpu_fpriv *fpriv, uint32_t id,
+ union drm_amdgpu_ctx_out *out)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ctx *ctx;
+ struct amdgpu_ctx_mgr *mgr;
+
+ if (!fpriv)
+ return -EINVAL;
+
+ mgr = &fpriv->ctx_mgr;
+ mutex_lock(&mgr->lock);
+ ctx = idr_find(&mgr->ctx_handles, id);
+ if (!ctx) {
+ mutex_unlock(&mgr->lock);
+ return -EINVAL;
+ }
+
+ out->state.flags = 0x0;
+ out->state.hangs = 0x0;
+
+ if (ctx->reset_counter != atomic_read(&adev->gpu_reset_counter))
+ out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET;
+
+ if (ctx->generation != amdgpu_vm_generation(adev, &fpriv->vm))
+ out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST;
+
+ if (atomic_read(&ctx->guilty))
+ out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
+
+ if (amdgpu_in_reset(adev))
+ out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS;
+
+ if (adev->ras_enabled && con) {
+ /* Return the cached values in O(1),
+ * and schedule delayed work to cache
+ * new vaues.
+ */
+ int ce_count, ue_count;
+
+ ce_count = atomic_read(&con->ras_ce_count);
+ ue_count = atomic_read(&con->ras_ue_count);
+
+ if (ce_count != ctx->ras_counter_ce) {
+ ctx->ras_counter_ce = ce_count;
+ out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
+ }
+
+ if (ue_count != ctx->ras_counter_ue) {
+ ctx->ras_counter_ue = ue_count;
+ out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
+ }
+
+ schedule_delayed_work(&con->ras_counte_delay_work,
+ msecs_to_jiffies(AMDGPU_RAS_COUNTE_DELAY_MS));
+ }
mutex_unlock(&mgr->lock);
return 0;
}
+static int amdgpu_ctx_stable_pstate(struct amdgpu_device *adev,
+ struct amdgpu_fpriv *fpriv, uint32_t id,
+ bool set, u32 *stable_pstate)
+{
+ struct amdgpu_ctx *ctx;
+ struct amdgpu_ctx_mgr *mgr;
+ int r;
+
+ if (!fpriv)
+ return -EINVAL;
+
+ mgr = &fpriv->ctx_mgr;
+ mutex_lock(&mgr->lock);
+ ctx = idr_find(&mgr->ctx_handles, id);
+ if (!ctx) {
+ mutex_unlock(&mgr->lock);
+ return -EINVAL;
+ }
+
+ if (set)
+ r = amdgpu_ctx_set_stable_pstate(ctx, *stable_pstate);
+ else
+ r = amdgpu_ctx_get_stable_pstate(ctx, stable_pstate);
+
+ mutex_unlock(&mgr->lock);
+ return r;
+}
+
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
int r;
- uint32_t id;
+ uint32_t id, stable_pstate;
+ int32_t priority;
union drm_amdgpu_ctx *args = data;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
- r = 0;
id = args->in.ctx_id;
+ priority = args->in.priority;
+
+ /* For backwards compatibility, we need to accept ioctls with garbage
+ * in the priority field. Garbage values in the priority field, result
+ * in the priority being set to NORMAL.
+ */
+ if (!amdgpu_ctx_priority_is_valid(priority))
+ priority = AMDGPU_CTX_PRIORITY_NORMAL;
switch (args->in.op) {
case AMDGPU_CTX_OP_ALLOC_CTX:
- r = amdgpu_ctx_alloc(adev, fpriv, &id);
+ if (args->in.flags)
+ return -EINVAL;
+ r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
args->out.alloc.ctx_id = id;
break;
case AMDGPU_CTX_OP_FREE_CTX:
+ if (args->in.flags)
+ return -EINVAL;
r = amdgpu_ctx_free(fpriv, id);
break;
case AMDGPU_CTX_OP_QUERY_STATE:
+ if (args->in.flags)
+ return -EINVAL;
r = amdgpu_ctx_query(adev, fpriv, id, &args->out);
break;
+ case AMDGPU_CTX_OP_QUERY_STATE2:
+ if (args->in.flags)
+ return -EINVAL;
+ r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
+ break;
+ case AMDGPU_CTX_OP_GET_STABLE_PSTATE:
+ if (args->in.flags)
+ return -EINVAL;
+ r = amdgpu_ctx_stable_pstate(adev, fpriv, id, false, &stable_pstate);
+ if (!r)
+ args->out.pstate.flags = stable_pstate;
+ break;
+ case AMDGPU_CTX_OP_SET_STABLE_PSTATE:
+ if (args->in.flags & ~AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK)
+ return -EINVAL;
+ stable_pstate = args->in.flags & AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK;
+ if (stable_pstate > AMDGPU_CTX_STABLE_PSTATE_PEAK)
+ return -EINVAL;
+ r = amdgpu_ctx_stable_pstate(adev, fpriv, id, true, &stable_pstate);
+ break;
default:
return -EINVAL;
}
@@ -236,82 +754,239 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
return 0;
}
-uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
+ struct drm_sched_entity *entity,
struct dma_fence *fence)
{
- struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
- uint64_t seq = cring->sequence;
- unsigned idx = 0;
+ struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
+ uint64_t seq = centity->sequence;
struct dma_fence *other = NULL;
+ unsigned idx = 0;
idx = seq & (amdgpu_sched_jobs - 1);
- other = cring->fences[idx];
- if (other) {
- signed long r;
- r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
- if (r < 0)
- DRM_ERROR("Error (%ld) waiting for fence!\n", r);
- }
+ other = centity->fences[idx];
+ WARN_ON(other && !dma_fence_is_signaled(other));
dma_fence_get(fence);
spin_lock(&ctx->ring_lock);
- cring->fences[idx] = fence;
- cring->sequence++;
+ centity->fences[idx] = fence;
+ centity->sequence++;
spin_unlock(&ctx->ring_lock);
- dma_fence_put(other);
+ atomic64_add(ktime_to_ns(amdgpu_ctx_fence_time(other)),
+ &ctx->mgr->time_spend[centity->hw_ip]);
+ dma_fence_put(other);
return seq;
}
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
- struct amdgpu_ring *ring, uint64_t seq)
+ struct drm_sched_entity *entity,
+ uint64_t seq)
{
- struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
+ struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
struct dma_fence *fence;
spin_lock(&ctx->ring_lock);
if (seq == ~0ull)
- seq = ctx->rings[ring->idx].sequence - 1;
+ seq = centity->sequence - 1;
- if (seq >= cring->sequence) {
+ if (seq >= centity->sequence) {
spin_unlock(&ctx->ring_lock);
return ERR_PTR(-EINVAL);
}
- if (seq + amdgpu_sched_jobs < cring->sequence) {
+ if (seq + amdgpu_sched_jobs < centity->sequence) {
spin_unlock(&ctx->ring_lock);
return NULL;
}
- fence = dma_fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
+ fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]);
spin_unlock(&ctx->ring_lock);
return fence;
}
-void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
+static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
+ struct amdgpu_ctx_entity *aentity,
+ int hw_ip,
+ int32_t priority)
+{
+ struct amdgpu_device *adev = ctx->mgr->adev;
+ unsigned int hw_prio;
+ struct drm_gpu_scheduler **scheds = NULL;
+ unsigned num_scheds;
+
+ /* set sw priority */
+ drm_sched_entity_set_priority(&aentity->entity,
+ amdgpu_ctx_to_drm_sched_prio(priority));
+
+ /* set hw priority */
+ if (hw_ip == AMDGPU_HW_IP_COMPUTE || hw_ip == AMDGPU_HW_IP_GFX) {
+ hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
+ hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX);
+ scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
+ num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
+ drm_sched_entity_modify_sched(&aentity->entity, scheds,
+ num_scheds);
+ }
+}
+
+void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
+ int32_t priority)
{
+ int32_t ctx_prio;
+ unsigned i, j;
+
+ ctx->override_priority = priority;
+
+ ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
+ ctx->init_priority : ctx->override_priority;
+ for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+ for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
+ if (!ctx->entities[i][j])
+ continue;
+
+ amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j],
+ i, ctx_prio);
+ }
+ }
+}
+
+int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
+ struct drm_sched_entity *entity)
+{
+ struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity);
+ struct dma_fence *other;
+ unsigned idx;
+ long r;
+
+ spin_lock(&ctx->ring_lock);
+ idx = centity->sequence & (amdgpu_sched_jobs - 1);
+ other = dma_fence_get(centity->fences[idx]);
+ spin_unlock(&ctx->ring_lock);
+
+ if (!other)
+ return 0;
+
+ r = dma_fence_wait(other, true);
+ if (r < 0 && r != -ERESTARTSYS)
+ DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+
+ dma_fence_put(other);
+ return r;
+}
+
+void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,
+ struct amdgpu_device *adev)
+{
+ unsigned int i;
+
+ mgr->adev = adev;
mutex_init(&mgr->lock);
- idr_init(&mgr->ctx_handles);
+ idr_init_base(&mgr->ctx_handles, 1);
+
+ for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
+ atomic64_set(&mgr->time_spend[i], 0);
}
-void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
+long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
{
struct amdgpu_ctx *ctx;
struct idr *idp;
- uint32_t id;
+ uint32_t id, i, j;
idp = &mgr->ctx_handles;
+ mutex_lock(&mgr->lock);
idr_for_each_entry(idp, ctx, id) {
- if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1)
+ for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+ for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
+ struct drm_sched_entity *entity;
+
+ if (!ctx->entities[i][j])
+ continue;
+
+ entity = &ctx->entities[i][j]->entity;
+ timeout = drm_sched_entity_flush(entity, timeout);
+ }
+ }
+ }
+ mutex_unlock(&mgr->lock);
+ return timeout;
+}
+
+static void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
+{
+ struct amdgpu_ctx *ctx;
+ struct idr *idp;
+ uint32_t id, i, j;
+
+ idp = &mgr->ctx_handles;
+
+ idr_for_each_entry(idp, ctx, id) {
+ if (kref_read(&ctx->refcount) != 1) {
DRM_ERROR("ctx %p is still alive\n", ctx);
+ continue;
+ }
+
+ for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
+ for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
+ struct drm_sched_entity *entity;
+
+ if (!ctx->entities[i][j])
+ continue;
+
+ entity = &ctx->entities[i][j]->entity;
+ drm_sched_entity_fini(entity);
+ }
+ }
+ kref_put(&ctx->refcount, amdgpu_ctx_fini);
}
+}
+void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
+{
+ amdgpu_ctx_mgr_entity_fini(mgr);
idr_destroy(&mgr->ctx_handles);
mutex_destroy(&mgr->lock);
}
+
+void amdgpu_ctx_mgr_usage(struct amdgpu_ctx_mgr *mgr,
+ ktime_t usage[AMDGPU_HW_IP_NUM])
+{
+ struct amdgpu_ctx *ctx;
+ unsigned int hw_ip, i;
+ uint32_t id;
+
+ /*
+ * This is a little bit racy because it can be that a ctx or a fence are
+ * destroyed just in the moment we try to account them. But that is ok
+ * since exactly that case is explicitely allowed by the interface.
+ */
+ mutex_lock(&mgr->lock);
+ for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
+ uint64_t ns = atomic64_read(&mgr->time_spend[hw_ip]);
+
+ usage[hw_ip] = ns_to_ktime(ns);
+ }
+
+ idr_for_each_entry(&mgr->ctx_handles, ctx, id) {
+ for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
+ for (i = 0; i < amdgpu_ctx_num_entities[hw_ip]; ++i) {
+ struct amdgpu_ctx_entity *centity;
+ ktime_t spend;
+
+ centity = ctx->entities[hw_ip][i];
+ if (!centity)
+ continue;
+ spend = amdgpu_ctx_entity_time(ctx, centity);
+ usage[hw_ip] = ktime_add(usage[hw_ip], spend);
+ }
+ }
+ }
+ mutex_unlock(&mgr->lock);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
new file mode 100644
index 000000000000..090dfe86f75b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_CTX_H__
+#define __AMDGPU_CTX_H__
+
+#include <linux/ktime.h>
+#include <linux/types.h>
+
+#include "amdgpu_ring.h"
+
+struct drm_device;
+struct drm_file;
+struct amdgpu_fpriv;
+struct amdgpu_ctx_mgr;
+
+#define AMDGPU_MAX_ENTITY_NUM 4
+
+struct amdgpu_ctx_entity {
+ uint32_t hw_ip;
+ uint64_t sequence;
+ struct drm_sched_entity entity;
+ struct dma_fence *fences[];
+};
+
+struct amdgpu_ctx {
+ struct kref refcount;
+ struct amdgpu_ctx_mgr *mgr;
+ unsigned reset_counter;
+ unsigned reset_counter_query;
+ uint64_t generation;
+ spinlock_t ring_lock;
+ struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM][AMDGPU_MAX_ENTITY_NUM];
+ bool preamble_presented;
+ int32_t init_priority;
+ int32_t override_priority;
+ atomic_t guilty;
+ unsigned long ras_counter_ce;
+ unsigned long ras_counter_ue;
+ uint32_t stable_pstate;
+ struct amdgpu_ctx_mgr *ctx_mgr;
+};
+
+struct amdgpu_ctx_mgr {
+ struct amdgpu_device *adev;
+ struct mutex lock;
+ /* protected by lock */
+ struct idr ctx_handles;
+ atomic64_t time_spend[AMDGPU_HW_IP_NUM];
+};
+
+extern const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM];
+
+struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
+int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
+
+int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
+ u32 ring, struct drm_sched_entity **entity);
+uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
+ struct drm_sched_entity *entity,
+ struct dma_fence *fence);
+struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
+ struct drm_sched_entity *entity,
+ uint64_t seq);
+bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio);
+void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, int32_t ctx_prio);
+
+int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+
+int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
+ struct drm_sched_entity *entity);
+
+void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr,
+ struct amdgpu_device *adev);
+long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
+void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
+void amdgpu_ctx_mgr_usage(struct amdgpu_ctx_mgr *mgr,
+ ktime_t usage[AMDGPU_HW_IP_NUM]);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
new file mode 100644
index 000000000000..a70651050acf
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -0,0 +1,2201 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kthread.h>
+#include <linux/pci.h>
+#include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
+
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_dm_debugfs.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_rap.h"
+#include "amdgpu_securedisplay.h"
+#include "amdgpu_fw_attestation.h"
+#include "amdgpu_umr.h"
+
+#include "amdgpu_reset.h"
+#include "amdgpu_psp_ta.h"
+
+#if defined(CONFIG_DEBUG_FS)
+
+/**
+ * amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes
+ *
+ * @read: True if reading
+ * @f: open file handle
+ * @buf: User buffer to write/read to
+ * @size: Number of bytes to write/read
+ * @pos: Offset to seek to
+ *
+ * This debugfs entry has special meaning on the offset being sought.
+ * Various bits have different meanings:
+ *
+ * Bit 62: Indicates a GRBM bank switch is needed
+ * Bit 61: Indicates a SRBM bank switch is needed (implies bit 62 is
+ * zero)
+ * Bits 24..33: The SE or ME selector if needed
+ * Bits 34..43: The SH (or SA) or PIPE selector if needed
+ * Bits 44..53: The INSTANCE (or CU/WGP) or QUEUE selector if needed
+ *
+ * Bit 23: Indicates that the PM power gating lock should be held
+ * This is necessary to read registers that might be
+ * unreliable during a power gating transistion.
+ *
+ * The lower bits are the BYTE offset of the register to read. This
+ * allows reading multiple registers in a single call and having
+ * the returned size reflect that.
+ */
+static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
+ char __user *buf, size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+ bool pm_pg_lock, use_bank, use_ring;
+ unsigned int instance_bank, sh_bank, se_bank, me, pipe, queue, vmid;
+
+ pm_pg_lock = use_bank = use_ring = false;
+ instance_bank = sh_bank = se_bank = me = pipe = queue = vmid = 0;
+
+ if (size & 0x3 || *pos & 0x3 ||
+ ((*pos & (1ULL << 62)) && (*pos & (1ULL << 61))))
+ return -EINVAL;
+
+ /* are we reading registers for which a PG lock is necessary? */
+ pm_pg_lock = (*pos >> 23) & 1;
+
+ if (*pos & (1ULL << 62)) {
+ se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
+ sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
+ instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
+
+ if (se_bank == 0x3FF)
+ se_bank = 0xFFFFFFFF;
+ if (sh_bank == 0x3FF)
+ sh_bank = 0xFFFFFFFF;
+ if (instance_bank == 0x3FF)
+ instance_bank = 0xFFFFFFFF;
+ use_bank = true;
+ } else if (*pos & (1ULL << 61)) {
+
+ me = (*pos & GENMASK_ULL(33, 24)) >> 24;
+ pipe = (*pos & GENMASK_ULL(43, 34)) >> 34;
+ queue = (*pos & GENMASK_ULL(53, 44)) >> 44;
+ vmid = (*pos & GENMASK_ULL(58, 54)) >> 54;
+
+ use_ring = true;
+ } else {
+ use_bank = use_ring = false;
+ }
+
+ *pos &= (1UL << 22) - 1;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ if (use_bank) {
+ if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
+ (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
+ return -EINVAL;
+ }
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se_bank,
+ sh_bank, instance_bank, 0);
+ } else if (use_ring) {
+ mutex_lock(&adev->srbm_mutex);
+ amdgpu_gfx_select_me_pipe_q(adev, me, pipe, queue, vmid, 0);
+ }
+
+ if (pm_pg_lock)
+ mutex_lock(&adev->pm.mutex);
+
+ while (size) {
+ uint32_t value;
+
+ if (read) {
+ value = RREG32(*pos >> 2);
+ r = put_user(value, (uint32_t *)buf);
+ } else {
+ r = get_user(value, (uint32_t *)buf);
+ if (!r)
+ amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value, 0);
+ }
+ if (r) {
+ result = r;
+ goto end;
+ }
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+end:
+ if (use_bank) {
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ } else if (use_ring) {
+ amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ }
+
+ if (pm_pg_lock)
+ mutex_unlock(&adev->pm.mutex);
+
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ amdgpu_virt_disable_access_debugfs(adev);
+ return result;
+}
+
+/*
+ * amdgpu_debugfs_regs_read - Callback for reading MMIO registers
+ */
+static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos);
+}
+
+/*
+ * amdgpu_debugfs_regs_write - Callback for writing MMIO registers
+ */
+static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
+}
+
+static int amdgpu_debugfs_regs2_open(struct inode *inode, struct file *file)
+{
+ struct amdgpu_debugfs_regs2_data *rd;
+
+ rd = kzalloc(sizeof(*rd), GFP_KERNEL);
+ if (!rd)
+ return -ENOMEM;
+ rd->adev = file_inode(file)->i_private;
+ file->private_data = rd;
+ mutex_init(&rd->lock);
+
+ return 0;
+}
+
+static int amdgpu_debugfs_regs2_release(struct inode *inode, struct file *file)
+{
+ struct amdgpu_debugfs_regs2_data *rd = file->private_data;
+
+ mutex_destroy(&rd->lock);
+ kfree(file->private_data);
+ return 0;
+}
+
+static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 offset, size_t size, int write_en)
+{
+ struct amdgpu_debugfs_regs2_data *rd = f->private_data;
+ struct amdgpu_device *adev = rd->adev;
+ ssize_t result = 0;
+ int r;
+ uint32_t value;
+
+ if (size & 0x3 || offset & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ mutex_lock(&rd->lock);
+
+ if (rd->id.use_grbm) {
+ if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) ||
+ (rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) {
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
+ mutex_unlock(&rd->lock);
+ return -EINVAL;
+ }
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, rd->id.grbm.se,
+ rd->id.grbm.sh,
+ rd->id.grbm.instance, rd->id.xcc_id);
+ }
+
+ if (rd->id.use_srbm) {
+ mutex_lock(&adev->srbm_mutex);
+ amdgpu_gfx_select_me_pipe_q(adev, rd->id.srbm.me, rd->id.srbm.pipe,
+ rd->id.srbm.queue, rd->id.srbm.vmid, rd->id.xcc_id);
+ }
+
+ if (rd->id.pg_lock)
+ mutex_lock(&adev->pm.mutex);
+
+ while (size) {
+ if (!write_en) {
+ value = RREG32(offset >> 2);
+ r = put_user(value, (uint32_t *)buf);
+ } else {
+ r = get_user(value, (uint32_t *)buf);
+ if (!r)
+ amdgpu_mm_wreg_mmio_rlc(adev, offset >> 2, value, rd->id.xcc_id);
+ }
+ if (r) {
+ result = r;
+ goto end;
+ }
+ offset += 4;
+ size -= 4;
+ result += 4;
+ buf += 4;
+ }
+end:
+ if (rd->id.use_grbm) {
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, rd->id.xcc_id);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ }
+
+ if (rd->id.use_srbm) {
+ amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, rd->id.xcc_id);
+ mutex_unlock(&adev->srbm_mutex);
+ }
+
+ if (rd->id.pg_lock)
+ mutex_unlock(&adev->pm.mutex);
+
+ mutex_unlock(&rd->lock);
+
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ amdgpu_virt_disable_access_debugfs(adev);
+ return result;
+}
+
+static long amdgpu_debugfs_regs2_ioctl(struct file *f, unsigned int cmd, unsigned long data)
+{
+ struct amdgpu_debugfs_regs2_data *rd = f->private_data;
+ struct amdgpu_debugfs_regs2_iocdata v1_data;
+ int r;
+
+ mutex_lock(&rd->lock);
+
+ switch (cmd) {
+ case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE_V2:
+ r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata_v2 *)data,
+ sizeof(rd->id));
+ if (r)
+ r = -EINVAL;
+ goto done;
+ case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE:
+ r = copy_from_user(&v1_data, (struct amdgpu_debugfs_regs2_iocdata *)data,
+ sizeof(v1_data));
+ if (r) {
+ r = -EINVAL;
+ goto done;
+ }
+ goto v1_copy;
+ default:
+ r = -EINVAL;
+ goto done;
+ }
+
+v1_copy:
+ rd->id.use_srbm = v1_data.use_srbm;
+ rd->id.use_grbm = v1_data.use_grbm;
+ rd->id.pg_lock = v1_data.pg_lock;
+ rd->id.grbm.se = v1_data.grbm.se;
+ rd->id.grbm.sh = v1_data.grbm.sh;
+ rd->id.grbm.instance = v1_data.grbm.instance;
+ rd->id.srbm.me = v1_data.srbm.me;
+ rd->id.srbm.pipe = v1_data.srbm.pipe;
+ rd->id.srbm.queue = v1_data.srbm.queue;
+ rd->id.xcc_id = 0;
+done:
+ mutex_unlock(&rd->lock);
+ return r;
+}
+
+static ssize_t amdgpu_debugfs_regs2_read(struct file *f, char __user *buf, size_t size, loff_t *pos)
+{
+ return amdgpu_debugfs_regs2_op(f, buf, *pos, size, 0);
+}
+
+static ssize_t amdgpu_debugfs_regs2_write(struct file *f, const char __user *buf, size_t size, loff_t *pos)
+{
+ return amdgpu_debugfs_regs2_op(f, (char __user *)buf, *pos, size, 1);
+}
+
+static int amdgpu_debugfs_gprwave_open(struct inode *inode, struct file *file)
+{
+ struct amdgpu_debugfs_gprwave_data *rd;
+
+ rd = kzalloc(sizeof(*rd), GFP_KERNEL);
+ if (!rd)
+ return -ENOMEM;
+ rd->adev = file_inode(file)->i_private;
+ file->private_data = rd;
+ mutex_init(&rd->lock);
+
+ return 0;
+}
+
+static int amdgpu_debugfs_gprwave_release(struct inode *inode, struct file *file)
+{
+ struct amdgpu_debugfs_gprwave_data *rd = file->private_data;
+
+ mutex_destroy(&rd->lock);
+ kfree(file->private_data);
+ return 0;
+}
+
+static ssize_t amdgpu_debugfs_gprwave_read(struct file *f, char __user *buf, size_t size, loff_t *pos)
+{
+ struct amdgpu_debugfs_gprwave_data *rd = f->private_data;
+ struct amdgpu_device *adev = rd->adev;
+ ssize_t result = 0;
+ int r;
+ uint32_t *data, x;
+
+ if (size > 4096 || size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
+ return -ENOMEM;
+ }
+
+ /* switch to the specific se/sh/cu */
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, rd->id.se, rd->id.sh, rd->id.cu, rd->id.xcc_id);
+
+ if (!rd->id.gpr_or_wave) {
+ x = 0;
+ if (adev->gfx.funcs->read_wave_data)
+ adev->gfx.funcs->read_wave_data(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, data, &x);
+ } else {
+ x = size >> 2;
+ if (rd->id.gpr.vpgr_or_sgpr) {
+ if (adev->gfx.funcs->read_wave_vgprs)
+ adev->gfx.funcs->read_wave_vgprs(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, rd->id.gpr.thread, *pos, size>>2, data);
+ } else {
+ if (adev->gfx.funcs->read_wave_sgprs)
+ adev->gfx.funcs->read_wave_sgprs(adev, rd->id.xcc_id, rd->id.simd, rd->id.wave, *pos, size>>2, data);
+ }
+ }
+
+ amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, rd->id.xcc_id);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ if (!x) {
+ result = -EINVAL;
+ goto done;
+ }
+
+ while (size && (*pos < x * 4)) {
+ uint32_t value;
+
+ value = data[*pos >> 2];
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ result = r;
+ goto done;
+ }
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+done:
+ amdgpu_virt_disable_access_debugfs(adev);
+ kfree(data);
+ return result;
+}
+
+static long amdgpu_debugfs_gprwave_ioctl(struct file *f, unsigned int cmd, unsigned long data)
+{
+ struct amdgpu_debugfs_gprwave_data *rd = f->private_data;
+ int r = 0;
+
+ mutex_lock(&rd->lock);
+
+ switch (cmd) {
+ case AMDGPU_DEBUGFS_GPRWAVE_IOC_SET_STATE:
+ if (copy_from_user(&rd->id,
+ (struct amdgpu_debugfs_gprwave_iocdata *)data,
+ sizeof(rd->id)))
+ r = -EFAULT;
+ goto done;
+ default:
+ r = -EINVAL;
+ goto done;
+ }
+
+done:
+ mutex_unlock(&rd->lock);
+ return r;
+}
+
+
+
+
+/**
+ * amdgpu_debugfs_regs_pcie_read - Read from a PCIE register
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos: Offset to seek to
+ *
+ * The lower bits are the BYTE offset of the register to read. This
+ * allows reading multiple registers in a single call and having
+ * the returned size reflect that.
+ */
+static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ while (size) {
+ uint32_t value;
+
+ if (upper_32_bits(*pos))
+ value = RREG32_PCIE_EXT(*pos);
+ else
+ value = RREG32_PCIE(*pos);
+
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ goto out;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ r = result;
+out:
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
+ return r;
+}
+
+/**
+ * amdgpu_debugfs_regs_pcie_write - Write to a PCIE register
+ *
+ * @f: open file handle
+ * @buf: User buffer to write data from
+ * @size: Number of bytes to write
+ * @pos: Offset to seek to
+ *
+ * The lower bits are the BYTE offset of the register to write. This
+ * allows writing multiple registers in a single call and having
+ * the returned size reflect that.
+ */
+static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ while (size) {
+ uint32_t value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ goto out;
+
+ if (upper_32_bits(*pos))
+ WREG32_PCIE_EXT(*pos, value);
+ else
+ WREG32_PCIE(*pos, value);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ r = result;
+out:
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
+ return r;
+}
+
+/**
+ * amdgpu_debugfs_regs_didt_read - Read from a DIDT register
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos: Offset to seek to
+ *
+ * The lower bits are the BYTE offset of the register to read. This
+ * allows reading multiple registers in a single call and having
+ * the returned size reflect that.
+ */
+static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ if (!adev->didt_rreg)
+ return -EOPNOTSUPP;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ while (size) {
+ uint32_t value;
+
+ value = RREG32_DIDT(*pos >> 2);
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ goto out;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ r = result;
+out:
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
+ return r;
+}
+
+/**
+ * amdgpu_debugfs_regs_didt_write - Write to a DIDT register
+ *
+ * @f: open file handle
+ * @buf: User buffer to write data from
+ * @size: Number of bytes to write
+ * @pos: Offset to seek to
+ *
+ * The lower bits are the BYTE offset of the register to write. This
+ * allows writing multiple registers in a single call and having
+ * the returned size reflect that.
+ */
+static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ if (!adev->didt_wreg)
+ return -EOPNOTSUPP;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ while (size) {
+ uint32_t value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ goto out;
+
+ WREG32_DIDT(*pos >> 2, value);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ r = result;
+out:
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
+ return r;
+}
+
+/**
+ * amdgpu_debugfs_regs_smc_read - Read from a SMC register
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos: Offset to seek to
+ *
+ * The lower bits are the BYTE offset of the register to read. This
+ * allows reading multiple registers in a single call and having
+ * the returned size reflect that.
+ */
+static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (!adev->smc_rreg)
+ return -EOPNOTSUPP;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ while (size) {
+ uint32_t value;
+
+ value = RREG32_SMC(*pos);
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ goto out;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ r = result;
+out:
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
+ return r;
+}
+
+/**
+ * amdgpu_debugfs_regs_smc_write - Write to a SMC register
+ *
+ * @f: open file handle
+ * @buf: User buffer to write data from
+ * @size: Number of bytes to write
+ * @pos: Offset to seek to
+ *
+ * The lower bits are the BYTE offset of the register to write. This
+ * allows writing multiple registers in a single call and having
+ * the returned size reflect that.
+ */
+static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (!adev->smc_wreg)
+ return -EOPNOTSUPP;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ while (size) {
+ uint32_t value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ goto out;
+
+ WREG32_SMC(*pos, value);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ r = result;
+out:
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
+ return r;
+}
+
+/**
+ * amdgpu_debugfs_gca_config_read - Read from gfx config data
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos: Offset to seek to
+ *
+ * This file is used to access configuration data in a somewhat
+ * stable fashion. The format is a series of DWORDs with the first
+ * indicating which revision it is. New content is appended to the
+ * end so that older software can still read the data.
+ */
+
+static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+ uint32_t *config, no_regs = 0;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
+ if (!config)
+ return -ENOMEM;
+
+ /* version, increment each time something is added */
+ config[no_regs++] = 5;
+ config[no_regs++] = adev->gfx.config.max_shader_engines;
+ config[no_regs++] = adev->gfx.config.max_tile_pipes;
+ config[no_regs++] = adev->gfx.config.max_cu_per_sh;
+ config[no_regs++] = adev->gfx.config.max_sh_per_se;
+ config[no_regs++] = adev->gfx.config.max_backends_per_se;
+ config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
+ config[no_regs++] = adev->gfx.config.max_gprs;
+ config[no_regs++] = adev->gfx.config.max_gs_threads;
+ config[no_regs++] = adev->gfx.config.max_hw_contexts;
+ config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
+ config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
+ config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
+ config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
+ config[no_regs++] = adev->gfx.config.num_tile_pipes;
+ config[no_regs++] = adev->gfx.config.backend_enable_mask;
+ config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
+ config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
+ config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
+ config[no_regs++] = adev->gfx.config.num_gpus;
+ config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
+ config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
+ config[no_regs++] = adev->gfx.config.gb_addr_config;
+ config[no_regs++] = adev->gfx.config.num_rbs;
+
+ /* rev==1 */
+ config[no_regs++] = adev->rev_id;
+ config[no_regs++] = adev->pg_flags;
+ config[no_regs++] = lower_32_bits(adev->cg_flags);
+
+ /* rev==2 */
+ config[no_regs++] = adev->family;
+ config[no_regs++] = adev->external_rev_id;
+
+ /* rev==3 */
+ config[no_regs++] = adev->pdev->device;
+ config[no_regs++] = adev->pdev->revision;
+ config[no_regs++] = adev->pdev->subsystem_device;
+ config[no_regs++] = adev->pdev->subsystem_vendor;
+
+ /* rev==4 APU flag */
+ config[no_regs++] = adev->flags & AMD_IS_APU ? 1 : 0;
+
+ /* rev==5 PG/CG flag upper 32bit */
+ config[no_regs++] = 0;
+ config[no_regs++] = upper_32_bits(adev->cg_flags);
+
+ while (size && (*pos < no_regs * 4)) {
+ uint32_t value;
+
+ value = config[*pos >> 2];
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ kfree(config);
+ return r;
+ }
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ kfree(config);
+ return result;
+}
+
+/**
+ * amdgpu_debugfs_sensor_read - Read from the powerplay sensors
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos: Offset to seek to
+ *
+ * The offset is treated as the BYTE address of one of the sensors
+ * enumerated in amd/include/kgd_pp_interface.h under the
+ * 'amd_pp_sensors' enumeration. For instance to read the UVD VCLK
+ * you would use the offset 3 * 4 = 12.
+ */
+static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ int idx, x, outsize, r, valuesize;
+ uint32_t values[16];
+
+ if (size & 3 || *pos & 0x3)
+ return -EINVAL;
+
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
+ /* convert offset to sensor number */
+ idx = *pos >> 2;
+
+ valuesize = sizeof(values);
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
+
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ if (r) {
+ amdgpu_virt_disable_access_debugfs(adev);
+ return r;
+ }
+
+ if (size > valuesize) {
+ amdgpu_virt_disable_access_debugfs(adev);
+ return -EINVAL;
+ }
+
+ outsize = 0;
+ x = 0;
+ if (!r) {
+ while (size) {
+ r = put_user(values[x++], (int32_t *)buf);
+ buf += 4;
+ size -= 4;
+ outsize += 4;
+ }
+ }
+
+ amdgpu_virt_disable_access_debugfs(adev);
+ return !r ? outsize : r;
+}
+
+/** amdgpu_debugfs_wave_read - Read WAVE STATUS data
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos: Offset to seek to
+ *
+ * The offset being sought changes which wave that the status data
+ * will be returned for. The bits are used as follows:
+ *
+ * Bits 0..6: Byte offset into data
+ * Bits 7..14: SE selector
+ * Bits 15..22: SH/SA selector
+ * Bits 23..30: CU/{WGP+SIMD} selector
+ * Bits 31..36: WAVE ID selector
+ * Bits 37..44: SIMD ID selector
+ *
+ * The returned data begins with one DWORD of version information
+ * Followed by WAVE STATUS registers relevant to the GFX IP version
+ * being used. See gfx_v8_0_read_wave_data() for an example output.
+ */
+static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ int r, x;
+ ssize_t result = 0;
+ uint32_t offset, se, sh, cu, wave, simd, data[32];
+
+ if (size & 3 || *pos & 3)
+ return -EINVAL;
+
+ /* decode offset */
+ offset = (*pos & GENMASK_ULL(6, 0));
+ se = (*pos & GENMASK_ULL(14, 7)) >> 7;
+ sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
+ cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
+ wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
+ simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ /* switch to the specific se/sh/cu */
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se, sh, cu, 0);
+
+ x = 0;
+ if (adev->gfx.funcs->read_wave_data)
+ adev->gfx.funcs->read_wave_data(adev, 0, simd, wave, data, &x);
+
+ amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ if (!x) {
+ amdgpu_virt_disable_access_debugfs(adev);
+ return -EINVAL;
+ }
+
+ while (size && (offset < x * 4)) {
+ uint32_t value;
+
+ value = data[offset >> 2];
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ amdgpu_virt_disable_access_debugfs(adev);
+ return r;
+ }
+
+ result += 4;
+ buf += 4;
+ offset += 4;
+ size -= 4;
+ }
+
+ amdgpu_virt_disable_access_debugfs(adev);
+ return result;
+}
+
+/** amdgpu_debugfs_gpr_read - Read wave gprs
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos: Offset to seek to
+ *
+ * The offset being sought changes which wave that the status data
+ * will be returned for. The bits are used as follows:
+ *
+ * Bits 0..11: Byte offset into data
+ * Bits 12..19: SE selector
+ * Bits 20..27: SH/SA selector
+ * Bits 28..35: CU/{WGP+SIMD} selector
+ * Bits 36..43: WAVE ID selector
+ * Bits 37..44: SIMD ID selector
+ * Bits 52..59: Thread selector
+ * Bits 60..61: Bank selector (VGPR=0,SGPR=1)
+ *
+ * The return data comes from the SGPR or VGPR register bank for
+ * the selected operational unit.
+ */
+static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = f->f_inode->i_private;
+ int r;
+ ssize_t result = 0;
+ uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
+
+ if (size > 4096 || size & 3 || *pos & 3)
+ return -EINVAL;
+
+ /* decode offset */
+ offset = (*pos & GENMASK_ULL(11, 0)) >> 2;
+ se = (*pos & GENMASK_ULL(19, 12)) >> 12;
+ sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
+ cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
+ wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
+ simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
+ thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
+ bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
+
+ data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0)
+ goto err;
+
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ goto err;
+
+ /* switch to the specific se/sh/cu */
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, se, sh, cu, 0);
+
+ if (bank == 0) {
+ if (adev->gfx.funcs->read_wave_vgprs)
+ adev->gfx.funcs->read_wave_vgprs(adev, 0, simd, wave, thread, offset, size>>2, data);
+ } else {
+ if (adev->gfx.funcs->read_wave_sgprs)
+ adev->gfx.funcs->read_wave_sgprs(adev, 0, simd, wave, offset, size>>2, data);
+ }
+
+ amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ while (size) {
+ uint32_t value;
+
+ value = data[result >> 2];
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ amdgpu_virt_disable_access_debugfs(adev);
+ goto err;
+ }
+
+ result += 4;
+ buf += 4;
+ size -= 4;
+ }
+
+ kfree(data);
+ amdgpu_virt_disable_access_debugfs(adev);
+ return result;
+
+err:
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ kfree(data);
+ return r;
+}
+
+/**
+ * amdgpu_debugfs_gfxoff_residency_read - Read GFXOFF residency
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos: Offset to seek to
+ *
+ * Read the last residency value logged. It doesn't auto update, one needs to
+ * stop logging before getting the current value.
+ */
+static ssize_t amdgpu_debugfs_gfxoff_residency_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ while (size) {
+ uint32_t value;
+
+ r = amdgpu_get_gfx_off_residency(adev, &value);
+ if (r)
+ goto out;
+
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ goto out;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ r = result;
+out:
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ return r;
+}
+
+/**
+ * amdgpu_debugfs_gfxoff_residency_write - Log GFXOFF Residency
+ *
+ * @f: open file handle
+ * @buf: User buffer to write data from
+ * @size: Number of bytes to write
+ * @pos: Offset to seek to
+ *
+ * Write a 32-bit non-zero to start logging; write a 32-bit zero to stop
+ */
+static ssize_t amdgpu_debugfs_gfxoff_residency_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ while (size) {
+ u32 value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ goto out;
+
+ amdgpu_set_gfx_off_residency(adev, value ? true : false);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ r = result;
+out:
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ return r;
+}
+
+
+/**
+ * amdgpu_debugfs_gfxoff_count_read - Read GFXOFF entry count
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos: Offset to seek to
+ */
+static ssize_t amdgpu_debugfs_gfxoff_count_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ while (size) {
+ u64 value = 0;
+
+ r = amdgpu_get_gfx_off_entrycount(adev, &value);
+ if (r)
+ goto out;
+
+ r = put_user(value, (u64 *)buf);
+ if (r)
+ goto out;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ r = result;
+out:
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ return r;
+}
+
+/**
+ * amdgpu_debugfs_gfxoff_write - Enable/disable GFXOFF
+ *
+ * @f: open file handle
+ * @buf: User buffer to write data from
+ * @size: Number of bytes to write
+ * @pos: Offset to seek to
+ *
+ * Write a 32-bit zero to disable or a 32-bit non-zero to enable
+ */
+static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ while (size) {
+ uint32_t value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ goto out;
+
+ amdgpu_gfx_off_ctrl(adev, value ? true : false);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ r = result;
+out:
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ return r;
+}
+
+
+/**
+ * amdgpu_debugfs_gfxoff_read - read gfxoff status
+ *
+ * @f: open file handle
+ * @buf: User buffer to store read data in
+ * @size: Number of bytes to read
+ * @pos: Offset to seek to
+ */
+static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ while (size) {
+ u32 value = adev->gfx.gfx_off_state;
+
+ r = put_user(value, (u32 *)buf);
+ if (r)
+ goto out;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ r = result;
+out:
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ return r;
+}
+
+static ssize_t amdgpu_debugfs_gfxoff_status_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ while (size) {
+ u32 value;
+
+ r = amdgpu_get_gfx_off_status(adev, &value);
+ if (r)
+ goto out;
+
+ r = put_user(value, (u32 *)buf);
+ if (r)
+ goto out;
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ r = result;
+out:
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ return r;
+}
+
+static const struct file_operations amdgpu_debugfs_regs2_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = amdgpu_debugfs_regs2_ioctl,
+ .read = amdgpu_debugfs_regs2_read,
+ .write = amdgpu_debugfs_regs2_write,
+ .open = amdgpu_debugfs_regs2_open,
+ .release = amdgpu_debugfs_regs2_release,
+ .llseek = default_llseek
+};
+
+static const struct file_operations amdgpu_debugfs_gprwave_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = amdgpu_debugfs_gprwave_ioctl,
+ .read = amdgpu_debugfs_gprwave_read,
+ .open = amdgpu_debugfs_gprwave_open,
+ .release = amdgpu_debugfs_gprwave_release,
+ .llseek = default_llseek
+};
+
+static const struct file_operations amdgpu_debugfs_regs_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_regs_read,
+ .write = amdgpu_debugfs_regs_write,
+ .llseek = default_llseek
+};
+static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_regs_didt_read,
+ .write = amdgpu_debugfs_regs_didt_write,
+ .llseek = default_llseek
+};
+static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_regs_pcie_read,
+ .write = amdgpu_debugfs_regs_pcie_write,
+ .llseek = default_llseek
+};
+static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_regs_smc_read,
+ .write = amdgpu_debugfs_regs_smc_write,
+ .llseek = default_llseek
+};
+
+static const struct file_operations amdgpu_debugfs_gca_config_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gca_config_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations amdgpu_debugfs_sensors_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_sensor_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations amdgpu_debugfs_wave_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_wave_read,
+ .llseek = default_llseek
+};
+static const struct file_operations amdgpu_debugfs_gpr_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gpr_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations amdgpu_debugfs_gfxoff_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gfxoff_read,
+ .write = amdgpu_debugfs_gfxoff_write,
+ .llseek = default_llseek
+};
+
+static const struct file_operations amdgpu_debugfs_gfxoff_status_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gfxoff_status_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations amdgpu_debugfs_gfxoff_count_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gfxoff_count_read,
+ .llseek = default_llseek
+};
+
+static const struct file_operations amdgpu_debugfs_gfxoff_residency_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_gfxoff_residency_read,
+ .write = amdgpu_debugfs_gfxoff_residency_write,
+ .llseek = default_llseek
+};
+
+static const struct file_operations *debugfs_regs[] = {
+ &amdgpu_debugfs_regs_fops,
+ &amdgpu_debugfs_regs2_fops,
+ &amdgpu_debugfs_gprwave_fops,
+ &amdgpu_debugfs_regs_didt_fops,
+ &amdgpu_debugfs_regs_pcie_fops,
+ &amdgpu_debugfs_regs_smc_fops,
+ &amdgpu_debugfs_gca_config_fops,
+ &amdgpu_debugfs_sensors_fops,
+ &amdgpu_debugfs_wave_fops,
+ &amdgpu_debugfs_gpr_fops,
+ &amdgpu_debugfs_gfxoff_fops,
+ &amdgpu_debugfs_gfxoff_status_fops,
+ &amdgpu_debugfs_gfxoff_count_fops,
+ &amdgpu_debugfs_gfxoff_residency_fops,
+};
+
+static const char * const debugfs_regs_names[] = {
+ "amdgpu_regs",
+ "amdgpu_regs2",
+ "amdgpu_gprwave",
+ "amdgpu_regs_didt",
+ "amdgpu_regs_pcie",
+ "amdgpu_regs_smc",
+ "amdgpu_gca_config",
+ "amdgpu_sensors",
+ "amdgpu_wave",
+ "amdgpu_gpr",
+ "amdgpu_gfxoff",
+ "amdgpu_gfxoff_status",
+ "amdgpu_gfxoff_count",
+ "amdgpu_gfxoff_residency",
+};
+
+/**
+ * amdgpu_debugfs_regs_init - Initialize debugfs entries that provide
+ * register access.
+ *
+ * @adev: The device to attach the debugfs entries to
+ */
+int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
+{
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *ent, *root = minor->debugfs_root;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
+ ent = debugfs_create_file(debugfs_regs_names[i],
+ S_IFREG | 0400, root,
+ adev, debugfs_regs[i]);
+ if (!i && !IS_ERR_OR_NULL(ent))
+ i_size_write(ent->d_inode, adev->rmmio_size);
+ }
+
+ return 0;
+}
+
+static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
+{
+ struct amdgpu_device *adev = m->private;
+ struct drm_device *dev = adev_to_drm(adev);
+ int r = 0, i;
+
+ r = pm_runtime_get_sync(dev->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(dev->dev);
+ return r;
+ }
+
+ /* Avoid accidently unparking the sched thread during GPU reset */
+ r = down_write_killable(&adev->reset_domain->sem);
+ if (r)
+ return r;
+
+ /* hold on the scheduler */
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!amdgpu_ring_sched_ready(ring))
+ continue;
+ drm_sched_wqueue_stop(&ring->sched);
+ }
+
+ seq_puts(m, "run ib test:\n");
+ r = amdgpu_ib_ring_tests(adev);
+ if (r)
+ seq_printf(m, "ib ring tests failed (%d).\n", r);
+ else
+ seq_puts(m, "ib ring tests passed.\n");
+
+ /* go on the scheduler */
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!amdgpu_ring_sched_ready(ring))
+ continue;
+ drm_sched_wqueue_start(&ring->sched);
+ }
+
+ up_write(&adev->reset_domain->sem);
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
+ return 0;
+}
+
+static int amdgpu_debugfs_evict_vram(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ struct drm_device *dev = adev_to_drm(adev);
+ int r;
+
+ r = pm_runtime_get_sync(dev->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(dev->dev);
+ return r;
+ }
+
+ *val = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
+ return 0;
+}
+
+
+static int amdgpu_debugfs_evict_gtt(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ struct drm_device *dev = adev_to_drm(adev);
+ int r;
+
+ r = pm_runtime_get_sync(dev->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(dev->dev);
+ return r;
+ }
+
+ *val = amdgpu_ttm_evict_resources(adev, TTM_PL_TT);
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
+ return 0;
+}
+
+static int amdgpu_debugfs_benchmark(void *data, u64 val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ struct drm_device *dev = adev_to_drm(adev);
+ int r;
+
+ r = pm_runtime_get_sync(dev->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(dev->dev);
+ return r;
+ }
+
+ r = amdgpu_benchmark(adev, val);
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
+ return r;
+}
+
+static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused)
+{
+ struct amdgpu_device *adev = m->private;
+ struct drm_device *dev = adev_to_drm(adev);
+ struct drm_file *file;
+ int r;
+
+ r = mutex_lock_interruptible(&dev->filelist_mutex);
+ if (r)
+ return r;
+
+ list_for_each_entry(file, &dev->filelist, lhead) {
+ struct amdgpu_fpriv *fpriv = file->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
+ struct amdgpu_task_info *ti;
+
+ ti = amdgpu_vm_get_task_info_vm(vm);
+ if (ti) {
+ seq_printf(m, "pid:%d\tProcess:%s ----------\n", ti->task.pid, ti->process_name);
+ amdgpu_vm_put_task_info(ti);
+ }
+
+ r = amdgpu_bo_reserve(vm->root.bo, true);
+ if (r)
+ break;
+ amdgpu_debugfs_vm_bo_info(vm, m);
+ amdgpu_bo_unreserve(vm->root.bo);
+ }
+
+ mutex_unlock(&dev->filelist_mutex);
+
+ return r;
+}
+
+DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_test_ib);
+DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_vm_info);
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_vram_fops, amdgpu_debugfs_evict_vram,
+ NULL, "%lld\n");
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_gtt_fops, amdgpu_debugfs_evict_gtt,
+ NULL, "%lld\n");
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_benchmark_fops, NULL, amdgpu_debugfs_benchmark,
+ "%lld\n");
+
+static void amdgpu_ib_preempt_fences_swap(struct amdgpu_ring *ring,
+ struct dma_fence **fences)
+{
+ struct amdgpu_fence_driver *drv = &ring->fence_drv;
+ uint32_t sync_seq, last_seq;
+
+ last_seq = atomic_read(&ring->fence_drv.last_seq);
+ sync_seq = ring->fence_drv.sync_seq;
+
+ last_seq &= drv->num_fences_mask;
+ sync_seq &= drv->num_fences_mask;
+
+ do {
+ struct dma_fence *fence, **ptr;
+
+ ++last_seq;
+ last_seq &= drv->num_fences_mask;
+ ptr = &drv->fences[last_seq];
+
+ fence = rcu_dereference_protected(*ptr, 1);
+ RCU_INIT_POINTER(*ptr, NULL);
+
+ if (!fence)
+ continue;
+
+ fences[last_seq] = fence;
+
+ } while (last_seq != sync_seq);
+}
+
+static void amdgpu_ib_preempt_signal_fences(struct dma_fence **fences,
+ int length)
+{
+ int i;
+ struct dma_fence *fence;
+
+ for (i = 0; i < length; i++) {
+ fence = fences[i];
+ if (!fence)
+ continue;
+ dma_fence_signal(fence);
+ dma_fence_put(fence);
+ }
+}
+
+static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
+{
+ struct drm_sched_job *s_job;
+ struct dma_fence *fence;
+
+ spin_lock(&sched->job_list_lock);
+ list_for_each_entry(s_job, &sched->pending_list, list) {
+ fence = sched->ops->run_job(s_job);
+ dma_fence_put(fence);
+ }
+ spin_unlock(&sched->job_list_lock);
+}
+
+static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
+{
+ struct amdgpu_job *job;
+ struct drm_sched_job *s_job, *tmp;
+ uint32_t preempt_seq;
+ struct dma_fence *fence, **ptr;
+ struct amdgpu_fence_driver *drv = &ring->fence_drv;
+ struct drm_gpu_scheduler *sched = &ring->sched;
+ bool preempted = true;
+
+ if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
+ return;
+
+ preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2));
+ if (preempt_seq <= atomic_read(&drv->last_seq)) {
+ preempted = false;
+ goto no_preempt;
+ }
+
+ preempt_seq &= drv->num_fences_mask;
+ ptr = &drv->fences[preempt_seq];
+ fence = rcu_dereference_protected(*ptr, 1);
+
+no_preempt:
+ spin_lock(&sched->job_list_lock);
+ list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
+ if (dma_fence_is_signaled(&s_job->s_fence->finished)) {
+ /* remove job from ring_mirror_list */
+ list_del_init(&s_job->list);
+ sched->ops->free_job(s_job);
+ continue;
+ }
+ job = to_amdgpu_job(s_job);
+ if (preempted && (&job->hw_fence.base) == fence)
+ /* mark the job as preempted */
+ job->preemption_status |= AMDGPU_IB_PREEMPTED;
+ }
+ spin_unlock(&sched->job_list_lock);
+}
+
+static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
+{
+ int r, length;
+ struct amdgpu_ring *ring;
+ struct dma_fence **fences = NULL;
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+
+ if (val >= AMDGPU_MAX_RINGS)
+ return -EINVAL;
+
+ ring = adev->rings[val];
+
+ if (!amdgpu_ring_sched_ready(ring) ||
+ !ring->funcs->preempt_ib)
+ return -EINVAL;
+
+ /* the last preemption failed */
+ if (ring->trail_seq != le32_to_cpu(*ring->trail_fence_cpu_addr))
+ return -EBUSY;
+
+ length = ring->fence_drv.num_fences_mask + 1;
+ fences = kcalloc(length, sizeof(void *), GFP_KERNEL);
+ if (!fences)
+ return -ENOMEM;
+
+ /* Avoid accidently unparking the sched thread during GPU reset */
+ r = down_read_killable(&adev->reset_domain->sem);
+ if (r)
+ goto pro_end;
+
+ /* stop the scheduler */
+ drm_sched_wqueue_stop(&ring->sched);
+
+ /* preempt the IB */
+ r = amdgpu_ring_preempt_ib(ring);
+ if (r) {
+ DRM_WARN("failed to preempt ring %d\n", ring->idx);
+ goto failure;
+ }
+
+ amdgpu_fence_process(ring);
+
+ if (atomic_read(&ring->fence_drv.last_seq) !=
+ ring->fence_drv.sync_seq) {
+ DRM_INFO("ring %d was preempted\n", ring->idx);
+
+ amdgpu_ib_preempt_mark_partial_job(ring);
+
+ /* swap out the old fences */
+ amdgpu_ib_preempt_fences_swap(ring, fences);
+
+ amdgpu_fence_driver_force_completion(ring);
+
+ /* resubmit unfinished jobs */
+ amdgpu_ib_preempt_job_recovery(&ring->sched);
+
+ /* wait for jobs finished */
+ amdgpu_fence_wait_empty(ring);
+
+ /* signal the old fences */
+ amdgpu_ib_preempt_signal_fences(fences, length);
+ }
+
+failure:
+ /* restart the scheduler */
+ drm_sched_wqueue_start(&ring->sched);
+
+ up_read(&adev->reset_domain->sem);
+
+pro_end:
+ kfree(fences);
+
+ return r;
+}
+
+static int amdgpu_debugfs_sclk_set(void *data, u64 val)
+{
+ int ret = 0;
+ uint32_t max_freq, min_freq;
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+
+ if (amdgpu_sriov_multi_vf_mode(adev))
+ return -EINVAL;
+
+ ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return ret;
+ }
+
+ ret = amdgpu_dpm_get_dpm_freq_range(adev, PP_SCLK, &min_freq, &max_freq);
+ if (ret == -EOPNOTSUPP) {
+ ret = 0;
+ goto out;
+ }
+ if (ret || val > max_freq || val < min_freq) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = amdgpu_dpm_set_soft_freq_range(adev, PP_SCLK, (uint32_t)val, (uint32_t)val);
+ if (ret)
+ ret = -EINVAL;
+
+out:
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ return ret;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
+ amdgpu_debugfs_ib_preempt, "%llu\n");
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
+ amdgpu_debugfs_sclk_set, "%llu\n");
+
+int amdgpu_debugfs_init(struct amdgpu_device *adev)
+{
+ struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
+ struct dentry *ent;
+ int r, i;
+
+ if (!debugfs_initialized())
+ return 0;
+
+ debugfs_create_x32("amdgpu_smu_debug", 0600, root,
+ &adev->pm.smu_debug_mask);
+
+ ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
+ &fops_ib_preempt);
+ if (IS_ERR(ent)) {
+ DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
+ return PTR_ERR(ent);
+ }
+
+ ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev,
+ &fops_sclk_set);
+ if (IS_ERR(ent)) {
+ DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
+ return PTR_ERR(ent);
+ }
+
+ /* Register debugfs entries for amdgpu_ttm */
+ amdgpu_ttm_debugfs_init(adev);
+ amdgpu_debugfs_pm_init(adev);
+ amdgpu_debugfs_sa_init(adev);
+ amdgpu_debugfs_fence_init(adev);
+ amdgpu_debugfs_gem_init(adev);
+
+ r = amdgpu_debugfs_regs_init(adev);
+ if (r)
+ DRM_ERROR("registering register debugfs failed (%d).\n", r);
+
+ amdgpu_debugfs_firmware_init(adev);
+ amdgpu_ta_if_debugfs_init(adev);
+
+ amdgpu_debugfs_mes_event_log_init(adev);
+
+#if defined(CONFIG_DRM_AMD_DC)
+ if (adev->dc_enabled)
+ dtn_debugfs_init(adev);
+#endif
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring)
+ continue;
+
+ amdgpu_debugfs_ring_init(adev, ring);
+ }
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (!amdgpu_vcnfw_log)
+ break;
+
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ amdgpu_debugfs_vcn_fwlog_init(adev, i, &adev->vcn.inst[i]);
+ }
+
+ if (amdgpu_umsch_mm & amdgpu_umsch_mm_fwlog)
+ amdgpu_debugfs_umsch_fwlog_init(adev, &adev->umsch_mm);
+
+ amdgpu_debugfs_vcn_sched_mask_init(adev);
+ amdgpu_debugfs_jpeg_sched_mask_init(adev);
+ amdgpu_debugfs_gfx_sched_mask_init(adev);
+ amdgpu_debugfs_compute_sched_mask_init(adev);
+ amdgpu_debugfs_sdma_sched_mask_init(adev);
+
+ amdgpu_ras_debugfs_create_all(adev);
+ amdgpu_rap_debugfs_init(adev);
+ amdgpu_securedisplay_debugfs_init(adev);
+ amdgpu_fw_attestation_debugfs_init(adev);
+ amdgpu_psp_debugfs_init(adev);
+
+ debugfs_create_file("amdgpu_evict_vram", 0400, root, adev,
+ &amdgpu_evict_vram_fops);
+ debugfs_create_file("amdgpu_evict_gtt", 0400, root, adev,
+ &amdgpu_evict_gtt_fops);
+ debugfs_create_file("amdgpu_test_ib", 0400, root, adev,
+ &amdgpu_debugfs_test_ib_fops);
+ debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
+ &amdgpu_debugfs_vm_info_fops);
+ debugfs_create_file("amdgpu_benchmark", 0200, root, adev,
+ &amdgpu_benchmark_fops);
+
+ adev->debugfs_vbios_blob.data = adev->bios;
+ adev->debugfs_vbios_blob.size = adev->bios_size;
+ debugfs_create_blob("amdgpu_vbios", 0444, root,
+ &adev->debugfs_vbios_blob);
+
+ adev->debugfs_discovery_blob.data = adev->mman.discovery_bin;
+ adev->debugfs_discovery_blob.size = adev->mman.discovery_tmr_size;
+ debugfs_create_blob("amdgpu_discovery", 0444, root,
+ &adev->debugfs_discovery_blob);
+
+ return 0;
+}
+
+static int amdgpu_pt_info_read(struct seq_file *m, void *unused)
+{
+ struct drm_file *file;
+ struct amdgpu_fpriv *fpriv;
+ struct amdgpu_bo *root_bo;
+ struct amdgpu_device *adev;
+ int r;
+
+ file = m->private;
+ if (!file)
+ return -EINVAL;
+
+ adev = drm_to_adev(file->minor->dev);
+ fpriv = file->driver_priv;
+ if (!fpriv || !fpriv->vm.root.bo)
+ return -ENODEV;
+
+ root_bo = amdgpu_bo_ref(fpriv->vm.root.bo);
+ r = amdgpu_bo_reserve(root_bo, true);
+ if (r) {
+ amdgpu_bo_unref(&root_bo);
+ return -EINVAL;
+ }
+
+ seq_printf(m, "pd_address: 0x%llx\n", amdgpu_gmc_pd_addr(fpriv->vm.root.bo));
+ seq_printf(m, "max_pfn: 0x%llx\n", adev->vm_manager.max_pfn);
+ seq_printf(m, "num_level: 0x%x\n", adev->vm_manager.num_level);
+ seq_printf(m, "block_size: 0x%x\n", adev->vm_manager.block_size);
+ seq_printf(m, "fragment_size: 0x%x\n", adev->vm_manager.fragment_size);
+
+ amdgpu_bo_unreserve(root_bo);
+ amdgpu_bo_unref(&root_bo);
+
+ return 0;
+}
+
+static int amdgpu_pt_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, amdgpu_pt_info_read, inode->i_private);
+}
+
+static const struct file_operations amdgpu_pt_info_fops = {
+ .owner = THIS_MODULE,
+ .open = amdgpu_pt_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void amdgpu_debugfs_vm_init(struct drm_file *file)
+{
+ debugfs_create_file("vm_pagetable_info", 0444, file->debugfs_client, file,
+ &amdgpu_pt_info_fops);
+}
+
+#else
+int amdgpu_debugfs_init(struct amdgpu_device *adev)
+{
+ return 0;
+}
+int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
+{
+ return 0;
+}
+void amdgpu_debugfs_vm_init(struct drm_file *file)
+{
+}
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
index c19c4d138751..e7b3c38e5186 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -1,5 +1,7 @@
/*
- * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -20,22 +22,16 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+/*
+ * Debugfs
+ */
-#ifndef __AMDGPU_PM_H__
-#define __AMDGPU_PM_H__
-
-struct cg_flag_name
-{
- u32 flag;
- const char *name;
-};
-
-int amdgpu_pm_sysfs_init(struct amdgpu_device *adev);
-void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev);
-void amdgpu_pm_print_power_states(struct amdgpu_device *adev);
-void amdgpu_pm_compute_clocks(struct amdgpu_device *adev);
-void amdgpu_dpm_thermal_work_handler(struct work_struct *work);
-void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
-void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
+int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
+int amdgpu_debugfs_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_fini(struct amdgpu_device *adev);
+void amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_vm_init(struct drm_file *file);
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
new file mode 100644
index 000000000000..8a026bc9ea44
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <generated/utsrelease.h>
+#include <linux/devcoredump.h>
+#include "amdgpu_dev_coredump.h"
+#include "atom.h"
+
+#ifndef CONFIG_DEV_COREDUMP
+void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
+ bool vram_lost, struct amdgpu_job *job)
+{
+}
+#else
+
+const char *hw_ip_names[MAX_HWIP] = {
+ [GC_HWIP] = "GC",
+ [HDP_HWIP] = "HDP",
+ [SDMA0_HWIP] = "SDMA0",
+ [SDMA1_HWIP] = "SDMA1",
+ [SDMA2_HWIP] = "SDMA2",
+ [SDMA3_HWIP] = "SDMA3",
+ [SDMA4_HWIP] = "SDMA4",
+ [SDMA5_HWIP] = "SDMA5",
+ [SDMA6_HWIP] = "SDMA6",
+ [SDMA7_HWIP] = "SDMA7",
+ [LSDMA_HWIP] = "LSDMA",
+ [MMHUB_HWIP] = "MMHUB",
+ [ATHUB_HWIP] = "ATHUB",
+ [NBIO_HWIP] = "NBIO",
+ [MP0_HWIP] = "MP0",
+ [MP1_HWIP] = "MP1",
+ [UVD_HWIP] = "UVD/JPEG/VCN",
+ [VCN1_HWIP] = "VCN1",
+ [VCE_HWIP] = "VCE",
+ [VPE_HWIP] = "VPE",
+ [DF_HWIP] = "DF",
+ [DCE_HWIP] = "DCE",
+ [OSSSYS_HWIP] = "OSSSYS",
+ [SMUIO_HWIP] = "SMUIO",
+ [PWR_HWIP] = "PWR",
+ [NBIF_HWIP] = "NBIF",
+ [THM_HWIP] = "THM",
+ [CLK_HWIP] = "CLK",
+ [UMC_HWIP] = "UMC",
+ [RSMU_HWIP] = "RSMU",
+ [XGMI_HWIP] = "XGMI",
+ [DCI_HWIP] = "DCI",
+ [PCIE_HWIP] = "PCIE",
+};
+
+static void amdgpu_devcoredump_fw_info(struct amdgpu_device *adev,
+ struct drm_printer *p)
+{
+ uint32_t version;
+ uint32_t feature;
+ uint8_t smu_program, smu_major, smu_minor, smu_debug;
+ struct atom_context *ctx = adev->mode_info.atom_context;
+
+ drm_printf(p, "VCE feature version: %u, fw version: 0x%08x\n",
+ adev->vce.fb_version, adev->vce.fw_version);
+ drm_printf(p, "UVD feature version: %u, fw version: 0x%08x\n", 0,
+ adev->uvd.fw_version);
+ drm_printf(p, "GMC feature version: %u, fw version: 0x%08x\n", 0,
+ adev->gmc.fw_version);
+ drm_printf(p, "ME feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.me_feature_version, adev->gfx.me_fw_version);
+ drm_printf(p, "PFP feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.pfp_feature_version, adev->gfx.pfp_fw_version);
+ drm_printf(p, "CE feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.ce_feature_version, adev->gfx.ce_fw_version);
+ drm_printf(p, "RLC feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.rlc_feature_version, adev->gfx.rlc_fw_version);
+
+ drm_printf(p, "RLC SRLC feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.rlc_srlc_feature_version,
+ adev->gfx.rlc_srlc_fw_version);
+ drm_printf(p, "RLC SRLG feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.rlc_srlg_feature_version,
+ adev->gfx.rlc_srlg_fw_version);
+ drm_printf(p, "RLC SRLS feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.rlc_srls_feature_version,
+ adev->gfx.rlc_srls_fw_version);
+ drm_printf(p, "RLCP feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.rlcp_ucode_feature_version,
+ adev->gfx.rlcp_ucode_version);
+ drm_printf(p, "RLCV feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.rlcv_ucode_feature_version,
+ adev->gfx.rlcv_ucode_version);
+ drm_printf(p, "MEC feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.mec_feature_version, adev->gfx.mec_fw_version);
+
+ if (adev->gfx.mec2_fw)
+ drm_printf(p, "MEC2 feature version: %u, fw version: 0x%08x\n",
+ adev->gfx.mec2_feature_version,
+ adev->gfx.mec2_fw_version);
+
+ drm_printf(p, "IMU feature version: %u, fw version: 0x%08x\n", 0,
+ adev->gfx.imu_fw_version);
+ drm_printf(p, "PSP SOS feature version: %u, fw version: 0x%08x\n",
+ adev->psp.sos.feature_version, adev->psp.sos.fw_version);
+ drm_printf(p, "PSP ASD feature version: %u, fw version: 0x%08x\n",
+ adev->psp.asd_context.bin_desc.feature_version,
+ adev->psp.asd_context.bin_desc.fw_version);
+
+ drm_printf(p, "TA XGMI feature version: 0x%08x, fw version: 0x%08x\n",
+ adev->psp.xgmi_context.context.bin_desc.feature_version,
+ adev->psp.xgmi_context.context.bin_desc.fw_version);
+ drm_printf(p, "TA RAS feature version: 0x%08x, fw version: 0x%08x\n",
+ adev->psp.ras_context.context.bin_desc.feature_version,
+ adev->psp.ras_context.context.bin_desc.fw_version);
+ drm_printf(p, "TA HDCP feature version: 0x%08x, fw version: 0x%08x\n",
+ adev->psp.hdcp_context.context.bin_desc.feature_version,
+ adev->psp.hdcp_context.context.bin_desc.fw_version);
+ drm_printf(p, "TA DTM feature version: 0x%08x, fw version: 0x%08x\n",
+ adev->psp.dtm_context.context.bin_desc.feature_version,
+ adev->psp.dtm_context.context.bin_desc.fw_version);
+ drm_printf(p, "TA RAP feature version: 0x%08x, fw version: 0x%08x\n",
+ adev->psp.rap_context.context.bin_desc.feature_version,
+ adev->psp.rap_context.context.bin_desc.fw_version);
+ drm_printf(p,
+ "TA SECURE DISPLAY feature version: 0x%08x, fw version: 0x%08x\n",
+ adev->psp.securedisplay_context.context.bin_desc.feature_version,
+ adev->psp.securedisplay_context.context.bin_desc.fw_version);
+
+ /* SMC firmware */
+ version = adev->pm.fw_version;
+
+ smu_program = (version >> 24) & 0xff;
+ smu_major = (version >> 16) & 0xff;
+ smu_minor = (version >> 8) & 0xff;
+ smu_debug = (version >> 0) & 0xff;
+ drm_printf(p,
+ "SMC feature version: %u, program: %d, fw version: 0x%08x (%d.%d.%d)\n",
+ 0, smu_program, version, smu_major, smu_minor, smu_debug);
+
+ /* SDMA firmware */
+ for (int i = 0; i < adev->sdma.num_instances; i++) {
+ drm_printf(p,
+ "SDMA%d feature version: %u, firmware version: 0x%08x\n",
+ i, adev->sdma.instance[i].feature_version,
+ adev->sdma.instance[i].fw_version);
+ }
+
+ drm_printf(p, "VCN feature version: %u, fw version: 0x%08x\n", 0,
+ adev->vcn.fw_version);
+ drm_printf(p, "DMCU feature version: %u, fw version: 0x%08x\n", 0,
+ adev->dm.dmcu_fw_version);
+ drm_printf(p, "DMCUB feature version: %u, fw version: 0x%08x\n", 0,
+ adev->dm.dmcub_fw_version);
+ drm_printf(p, "PSP TOC feature version: %u, fw version: 0x%08x\n",
+ adev->psp.toc.feature_version, adev->psp.toc.fw_version);
+
+ version = adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK;
+ feature = (adev->mes.kiq_version & AMDGPU_MES_FEAT_VERSION_MASK) >>
+ AMDGPU_MES_FEAT_VERSION_SHIFT;
+ drm_printf(p, "MES_KIQ feature version: %u, fw version: 0x%08x\n",
+ feature, version);
+
+ version = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
+ feature = (adev->mes.sched_version & AMDGPU_MES_FEAT_VERSION_MASK) >>
+ AMDGPU_MES_FEAT_VERSION_SHIFT;
+ drm_printf(p, "MES feature version: %u, fw version: 0x%08x\n", feature,
+ version);
+
+ drm_printf(p, "VPE feature version: %u, fw version: 0x%08x\n",
+ adev->vpe.feature_version, adev->vpe.fw_version);
+
+ drm_printf(p, "\nVBIOS Information\n");
+ drm_printf(p, "vbios name : %s\n", ctx->name);
+ drm_printf(p, "vbios pn : %s\n", ctx->vbios_pn);
+ drm_printf(p, "vbios version : %d\n", ctx->version);
+ drm_printf(p, "vbios ver_str : %s\n", ctx->vbios_ver_str);
+ drm_printf(p, "vbios date : %s\n", ctx->date);
+}
+
+static ssize_t
+amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
+ void *data, size_t datalen)
+{
+ struct drm_printer p;
+ struct amdgpu_coredump_info *coredump = data;
+ struct drm_print_iterator iter;
+ struct amdgpu_vm_fault_info *fault_info;
+ struct amdgpu_ip_block *ip_block;
+ int ver;
+
+ iter.data = buffer;
+ iter.offset = 0;
+ iter.start = offset;
+ iter.remain = count;
+
+ p = drm_coredump_printer(&iter);
+
+ drm_printf(&p, "**** AMDGPU Device Coredump ****\n");
+ drm_printf(&p, "version: " AMDGPU_COREDUMP_VERSION "\n");
+ drm_printf(&p, "kernel: " UTS_RELEASE "\n");
+ drm_printf(&p, "module: " KBUILD_MODNAME "\n");
+ drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec,
+ coredump->reset_time.tv_nsec);
+
+ if (coredump->reset_task_info.task.pid)
+ drm_printf(&p, "process_name: %s PID: %d\n",
+ coredump->reset_task_info.process_name,
+ coredump->reset_task_info.task.pid);
+
+ /* SOC Information */
+ drm_printf(&p, "\nSOC Information\n");
+ drm_printf(&p, "SOC Device id: %d\n", coredump->adev->pdev->device);
+ drm_printf(&p, "SOC PCI Revision id: %d\n", coredump->adev->pdev->revision);
+ drm_printf(&p, "SOC Family: %d\n", coredump->adev->family);
+ drm_printf(&p, "SOC Revision id: %d\n", coredump->adev->rev_id);
+ drm_printf(&p, "SOC External Revision id: %d\n", coredump->adev->external_rev_id);
+
+ /* Memory Information */
+ drm_printf(&p, "\nSOC Memory Information\n");
+ drm_printf(&p, "real vram size: %llu\n", coredump->adev->gmc.real_vram_size);
+ drm_printf(&p, "visible vram size: %llu\n", coredump->adev->gmc.visible_vram_size);
+ drm_printf(&p, "gtt size: %llu\n", coredump->adev->mman.gtt_mgr.manager.size);
+
+ /* GDS Config */
+ drm_printf(&p, "\nGDS Config\n");
+ drm_printf(&p, "gds: total size: %d\n", coredump->adev->gds.gds_size);
+ drm_printf(&p, "gds: compute partition size: %d\n", coredump->adev->gds.gds_size);
+ drm_printf(&p, "gds: gws per compute partition: %d\n", coredump->adev->gds.gws_size);
+ drm_printf(&p, "gds: os per compute partition: %d\n", coredump->adev->gds.oa_size);
+
+ /* HWIP Version Information */
+ drm_printf(&p, "\nHW IP Version Information\n");
+ for (int i = 1; i < MAX_HWIP; i++) {
+ for (int j = 0; j < HWIP_MAX_INSTANCE; j++) {
+ ver = coredump->adev->ip_versions[i][j];
+ if (ver)
+ drm_printf(&p, "HWIP: %s[%d][%d]: v%d.%d.%d.%d.%d\n",
+ hw_ip_names[i], i, j,
+ IP_VERSION_MAJ(ver),
+ IP_VERSION_MIN(ver),
+ IP_VERSION_REV(ver),
+ IP_VERSION_VARIANT(ver),
+ IP_VERSION_SUBREV(ver));
+ }
+ }
+
+ /* IP firmware information */
+ drm_printf(&p, "\nIP Firmwares\n");
+ amdgpu_devcoredump_fw_info(coredump->adev, &p);
+
+ if (coredump->ring) {
+ drm_printf(&p, "\nRing timed out details\n");
+ drm_printf(&p, "IP Type: %d Ring Name: %s\n",
+ coredump->ring->funcs->type,
+ coredump->ring->name);
+ }
+
+ /* Add page fault information */
+ fault_info = &coredump->adev->vm_manager.fault_info;
+ drm_printf(&p, "\n[%s] Page fault observed\n",
+ fault_info->vmhub ? "mmhub" : "gfxhub");
+ drm_printf(&p, "Faulty page starting at address: 0x%016llx\n", fault_info->addr);
+ drm_printf(&p, "Protection fault status register: 0x%x\n\n", fault_info->status);
+
+ /* dump the ip state for each ip */
+ drm_printf(&p, "IP Dump\n");
+ for (int i = 0; i < coredump->adev->num_ip_blocks; i++) {
+ ip_block = &coredump->adev->ip_blocks[i];
+ if (ip_block->version->funcs->print_ip_state) {
+ drm_printf(&p, "IP: %s\n", ip_block->version->funcs->name);
+ ip_block->version->funcs->print_ip_state(ip_block, &p);
+ drm_printf(&p, "\n");
+ }
+ }
+
+ /* Add ring buffer information */
+ drm_printf(&p, "Ring buffer information\n");
+ for (int i = 0; i < coredump->adev->num_rings; i++) {
+ int j = 0;
+ struct amdgpu_ring *ring = coredump->adev->rings[i];
+
+ drm_printf(&p, "ring name: %s\n", ring->name);
+ drm_printf(&p, "Rptr: 0x%llx Wptr: 0x%llx RB mask: %x\n",
+ amdgpu_ring_get_rptr(ring),
+ amdgpu_ring_get_wptr(ring),
+ ring->buf_mask);
+ drm_printf(&p, "Ring size in dwords: %d\n",
+ ring->ring_size / 4);
+ drm_printf(&p, "Ring contents\n");
+ drm_printf(&p, "Offset \t Value\n");
+
+ while (j < ring->ring_size) {
+ drm_printf(&p, "0x%x \t 0x%x\n", j, ring->ring[j / 4]);
+ j += 4;
+ }
+ }
+
+ if (coredump->skip_vram_check)
+ drm_printf(&p, "VRAM lost check is skipped!\n");
+ else if (coredump->reset_vram_lost)
+ drm_printf(&p, "VRAM is lost due to GPU reset!\n");
+
+ return count - iter.remain;
+}
+
+static void amdgpu_devcoredump_free(void *data)
+{
+ kfree(data);
+}
+
+void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
+ bool vram_lost, struct amdgpu_job *job)
+{
+ struct drm_device *dev = adev_to_drm(adev);
+ struct amdgpu_coredump_info *coredump;
+ struct drm_sched_job *s_job;
+
+ coredump = kzalloc(sizeof(*coredump), GFP_NOWAIT);
+
+ if (!coredump) {
+ DRM_ERROR("%s: failed to allocate memory for coredump\n", __func__);
+ return;
+ }
+
+ coredump->skip_vram_check = skip_vram_check;
+ coredump->reset_vram_lost = vram_lost;
+
+ if (job && job->pasid) {
+ struct amdgpu_task_info *ti;
+
+ ti = amdgpu_vm_get_task_info_pasid(adev, job->pasid);
+ if (ti) {
+ coredump->reset_task_info = *ti;
+ amdgpu_vm_put_task_info(ti);
+ }
+ }
+
+ if (job) {
+ s_job = &job->base;
+ coredump->ring = to_amdgpu_ring(s_job->sched);
+ }
+
+ coredump->adev = adev;
+
+ ktime_get_ts64(&coredump->reset_time);
+
+ dev_coredumpm(dev->dev, THIS_MODULE, coredump, 0, GFP_NOWAIT,
+ amdgpu_devcoredump_read, amdgpu_devcoredump_free);
+
+ drm_info(dev, "AMDGPU device coredump file has been created\n");
+ drm_info(dev, "Check your /sys/class/drm/card%d/device/devcoredump/data\n",
+ dev->primary->index);
+}
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h
new file mode 100644
index 000000000000..ef9772c6bcc9
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_DEV_COREDUMP_H__
+#define __AMDGPU_DEV_COREDUMP_H__
+
+#include "amdgpu.h"
+
+#ifdef CONFIG_DEV_COREDUMP
+
+#define AMDGPU_COREDUMP_VERSION "1"
+
+struct amdgpu_coredump_info {
+ struct amdgpu_device *adev;
+ struct amdgpu_task_info reset_task_info;
+ struct timespec64 reset_time;
+ bool skip_vram_check;
+ bool reset_vram_lost;
+ struct amdgpu_ring *ring;
+};
+#endif
+
+void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
+ bool vram_lost, struct amdgpu_job *job);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 43ca16b6eee2..7a899fb4de29 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -25,13 +25,24 @@
* Alex Deucher
* Jerome Glisse
*/
+
+#include <linux/aperture.h>
+#include <linux/power_supply.h>
#include <linux/kthread.h>
+#include <linux/module.h>
#include <linux/console.h>
#include <linux/slab.h>
-#include <linux/debugfs.h>
-#include <drm/drmP.h>
+#include <linux/iommu.h>
+#include <linux/pci.h>
+#include <linux/pci-p2pdma.h>
+#include <linux/apple-gmux.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_client_event.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
+#include <linux/device.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <linux/efi.h>
@@ -50,14 +61,55 @@
#endif
#include "vi.h"
#include "soc15.h"
+#include "nv.h"
#include "bif/bif_4_1_d.h"
-#include <linux/pci.h>
#include <linux/firmware.h>
+#include "amdgpu_vf_error.h"
+
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_pm.h"
+
+#include "amdgpu_xgmi.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_pmu.h"
+#include "amdgpu_fru_eeprom.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_virt.h"
+#include "amdgpu_dev_coredump.h"
+
+#include <linux/suspend.h>
+#include <drm/task_barrier.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_drv.h>
+
+#if IS_ENABLED(CONFIG_X86)
+#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
+#endif
+
+MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
+MODULE_FIRMWARE("amdgpu/cyan_skillfish_gpu_info.bin");
-static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
-static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
+#define AMDGPU_RESUME_MS 2000
+#define AMDGPU_MAX_RETRY_LIMIT 2
+#define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
+#define AMDGPU_PCIE_INDEX_FALLBACK (0x38 >> 2)
+#define AMDGPU_PCIE_INDEX_HI_FALLBACK (0x44 >> 2)
+#define AMDGPU_PCIE_DATA_FALLBACK (0x3C >> 2)
-static const char *amdgpu_asic_name[] = {
+#define AMDGPU_VBIOS_SKIP (1U << 0)
+#define AMDGPU_VBIOS_OPTIONAL (1U << 1)
+
+static const struct drm_driver amdgpu_kms_driver;
+
+const char *amdgpu_asic_name[] = {
"TAHITI",
"PITCAIRN",
"VERDE",
@@ -76,169 +128,1153 @@ static const char *amdgpu_asic_name[] = {
"POLARIS10",
"POLARIS11",
"POLARIS12",
+ "VEGAM",
"VEGA10",
+ "VEGA12",
+ "VEGA20",
+ "RAVEN",
+ "ARCTURUS",
+ "RENOIR",
+ "ALDEBARAN",
+ "NAVI10",
+ "CYAN_SKILLFISH",
+ "NAVI14",
+ "NAVI12",
+ "SIENNA_CICHLID",
+ "NAVY_FLOUNDER",
+ "VANGOGH",
+ "DIMGREY_CAVEFISH",
+ "BEIGE_GOBY",
+ "YELLOW_CARP",
+ "IP DISCOVERY",
"LAST",
};
-bool amdgpu_device_is_px(struct drm_device *dev)
+#define AMDGPU_IP_BLK_MASK_ALL GENMASK(AMD_IP_BLOCK_TYPE_NUM - 1, 0)
+/*
+ * Default init level where all blocks are expected to be initialized. This is
+ * the level of initialization expected by default and also after a full reset
+ * of the device.
+ */
+struct amdgpu_init_level amdgpu_init_default = {
+ .level = AMDGPU_INIT_LEVEL_DEFAULT,
+ .hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL,
+};
+
+struct amdgpu_init_level amdgpu_init_recovery = {
+ .level = AMDGPU_INIT_LEVEL_RESET_RECOVERY,
+ .hwini_ip_block_mask = AMDGPU_IP_BLK_MASK_ALL,
+};
+
+/*
+ * Minimal blocks needed to be initialized before a XGMI hive can be reset. This
+ * is used for cases like reset on initialization where the entire hive needs to
+ * be reset before first use.
+ */
+struct amdgpu_init_level amdgpu_init_minimal_xgmi = {
+ .level = AMDGPU_INIT_LEVEL_MINIMAL_XGMI,
+ .hwini_ip_block_mask =
+ BIT(AMD_IP_BLOCK_TYPE_GMC) | BIT(AMD_IP_BLOCK_TYPE_SMC) |
+ BIT(AMD_IP_BLOCK_TYPE_COMMON) | BIT(AMD_IP_BLOCK_TYPE_IH) |
+ BIT(AMD_IP_BLOCK_TYPE_PSP)
+};
+
+static void amdgpu_device_load_switch_state(struct amdgpu_device *adev);
+
+static inline bool amdgpu_ip_member_of_hwini(struct amdgpu_device *adev,
+ enum amd_ip_block_type block)
+{
+ return (adev->init_lvl->hwini_ip_block_mask & (1U << block)) != 0;
+}
+
+void amdgpu_set_init_level(struct amdgpu_device *adev,
+ enum amdgpu_init_lvl_id lvl)
+{
+ switch (lvl) {
+ case AMDGPU_INIT_LEVEL_MINIMAL_XGMI:
+ adev->init_lvl = &amdgpu_init_minimal_xgmi;
+ break;
+ case AMDGPU_INIT_LEVEL_RESET_RECOVERY:
+ adev->init_lvl = &amdgpu_init_recovery;
+ break;
+ case AMDGPU_INIT_LEVEL_DEFAULT:
+ fallthrough;
+ default:
+ adev->init_lvl = &amdgpu_init_default;
+ break;
+ }
+}
+
+static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev);
+static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
+ void *data);
+
+/**
+ * DOC: pcie_replay_count
+ *
+ * The amdgpu driver provides a sysfs API for reporting the total number
+ * of PCIe replays (NAKs).
+ * The file pcie_replay_count is used for this and returns the total
+ * number of replays as a sum of the NAKs generated and NAKs received.
+ */
+
+static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);
+
+ return sysfs_emit(buf, "%llu\n", cnt);
+}
+
+static DEVICE_ATTR(pcie_replay_count, 0444,
+ amdgpu_device_get_pcie_replay_count, NULL);
+
+static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev)
+{
+ int ret = 0;
+
+ if (amdgpu_nbio_is_replay_cnt_supported(adev))
+ ret = sysfs_create_file(&adev->dev->kobj,
+ &dev_attr_pcie_replay_count.attr);
+
+ return ret;
+}
+
+static void amdgpu_device_attr_sysfs_fini(struct amdgpu_device *adev)
+{
+ if (amdgpu_nbio_is_replay_cnt_supported(adev))
+ sysfs_remove_file(&adev->dev->kobj,
+ &dev_attr_pcie_replay_count.attr);
+}
+
+static ssize_t amdgpu_sysfs_reg_state_get(struct file *f, struct kobject *kobj,
+ const struct bin_attribute *attr, char *buf,
+ loff_t ppos, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ ssize_t bytes_read;
+
+ switch (ppos) {
+ case AMDGPU_SYS_REG_STATE_XGMI:
+ bytes_read = amdgpu_asic_get_reg_state(
+ adev, AMDGPU_REG_STATE_TYPE_XGMI, buf, count);
+ break;
+ case AMDGPU_SYS_REG_STATE_WAFL:
+ bytes_read = amdgpu_asic_get_reg_state(
+ adev, AMDGPU_REG_STATE_TYPE_WAFL, buf, count);
+ break;
+ case AMDGPU_SYS_REG_STATE_PCIE:
+ bytes_read = amdgpu_asic_get_reg_state(
+ adev, AMDGPU_REG_STATE_TYPE_PCIE, buf, count);
+ break;
+ case AMDGPU_SYS_REG_STATE_USR:
+ bytes_read = amdgpu_asic_get_reg_state(
+ adev, AMDGPU_REG_STATE_TYPE_USR, buf, count);
+ break;
+ case AMDGPU_SYS_REG_STATE_USR_1:
+ bytes_read = amdgpu_asic_get_reg_state(
+ adev, AMDGPU_REG_STATE_TYPE_USR_1, buf, count);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return bytes_read;
+}
+
+static const BIN_ATTR(reg_state, 0444, amdgpu_sysfs_reg_state_get, NULL,
+ AMDGPU_SYS_REG_STATE_END);
+
+int amdgpu_reg_state_sysfs_init(struct amdgpu_device *adev)
+{
+ int ret;
+
+ if (!amdgpu_asic_get_reg_state_supported(adev))
+ return 0;
+
+ ret = sysfs_create_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
+
+ return ret;
+}
+
+void amdgpu_reg_state_sysfs_fini(struct amdgpu_device *adev)
+{
+ if (!amdgpu_asic_get_reg_state_supported(adev))
+ return;
+ sysfs_remove_bin_file(&adev->dev->kobj, &bin_attr_reg_state);
+}
+
+int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block)
+{
+ int r;
+
+ if (ip_block->version->funcs->suspend) {
+ r = ip_block->version->funcs->suspend(ip_block);
+ if (r) {
+ dev_err(ip_block->adev->dev,
+ "suspend of IP block <%s> failed %d\n",
+ ip_block->version->funcs->name, r);
+ return r;
+ }
+ }
+
+ ip_block->status.hw = false;
+ return 0;
+}
+
+int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block)
+{
+ int r;
+
+ if (ip_block->version->funcs->resume) {
+ r = ip_block->version->funcs->resume(ip_block);
+ if (r) {
+ dev_err(ip_block->adev->dev,
+ "resume of IP block <%s> failed %d\n",
+ ip_block->version->funcs->name, r);
+ return r;
+ }
+ }
+
+ ip_block->status.hw = true;
+ return 0;
+}
+
+/**
+ * DOC: board_info
+ *
+ * The amdgpu driver provides a sysfs API for giving board related information.
+ * It provides the form factor information in the format
+ *
+ * type : form factor
+ *
+ * Possible form factor values
+ *
+ * - "cem" - PCIE CEM card
+ * - "oam" - Open Compute Accelerator Module
+ * - "unknown" - Not known
+ *
+ */
+
+static ssize_t amdgpu_device_get_board_info(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ enum amdgpu_pkg_type pkg_type = AMDGPU_PKG_TYPE_CEM;
+ const char *pkg;
+
+ if (adev->smuio.funcs && adev->smuio.funcs->get_pkg_type)
+ pkg_type = adev->smuio.funcs->get_pkg_type(adev);
+
+ switch (pkg_type) {
+ case AMDGPU_PKG_TYPE_CEM:
+ pkg = "cem";
+ break;
+ case AMDGPU_PKG_TYPE_OAM:
+ pkg = "oam";
+ break;
+ default:
+ pkg = "unknown";
+ break;
+ }
+
+ return sysfs_emit(buf, "%s : %s\n", "type", pkg);
+}
+
+static DEVICE_ATTR(board_info, 0444, amdgpu_device_get_board_info, NULL);
+
+static struct attribute *amdgpu_board_attrs[] = {
+ &dev_attr_board_info.attr,
+ NULL,
+};
+
+static umode_t amdgpu_board_attrs_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (adev->flags & AMD_IS_APU)
+ return 0;
+
+ return attr->mode;
+}
+
+static const struct attribute_group amdgpu_board_attrs_group = {
+ .attrs = amdgpu_board_attrs,
+ .is_visible = amdgpu_board_attrs_is_visible
+};
+
+static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
+
+/**
+ * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Returns true if the device is a dGPU with ATPX power control,
+ * otherwise return false.
+ */
+bool amdgpu_device_supports_px(struct amdgpu_device *adev)
+{
+ if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
+ return true;
+ return false;
+}
+
+/**
+ * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Returns true if the device is a dGPU with ACPI power control,
+ * otherwise return false.
+ */
+bool amdgpu_device_supports_boco(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = dev->dev_private;
+ if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
+ return false;
- if (adev->flags & AMD_IS_PX)
+ if (adev->has_pr3 ||
+ ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
return true;
return false;
}
+/**
+ * amdgpu_device_supports_baco - Does the device support BACO
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Return:
+ * 1 if the device supports BACO;
+ * 3 if the device supports MACO (only works if BACO is supported)
+ * otherwise return 0.
+ */
+int amdgpu_device_supports_baco(struct amdgpu_device *adev)
+{
+ return amdgpu_asic_supports_baco(adev);
+}
+
+void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev)
+{
+ int bamaco_support;
+
+ adev->pm.rpm_mode = AMDGPU_RUNPM_NONE;
+ bamaco_support = amdgpu_device_supports_baco(adev);
+
+ switch (amdgpu_runtime_pm) {
+ case 2:
+ if (bamaco_support & MACO_SUPPORT) {
+ adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO;
+ dev_info(adev->dev, "Forcing BAMACO for runtime pm\n");
+ } else if (bamaco_support == BACO_SUPPORT) {
+ adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
+ dev_info(adev->dev, "Requested mode BAMACO not available,fallback to use BACO\n");
+ }
+ break;
+ case 1:
+ if (bamaco_support & BACO_SUPPORT) {
+ adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
+ dev_info(adev->dev, "Forcing BACO for runtime pm\n");
+ }
+ break;
+ case -1:
+ case -2:
+ if (amdgpu_device_supports_px(adev)) {
+ /* enable PX as runtime mode */
+ adev->pm.rpm_mode = AMDGPU_RUNPM_PX;
+ dev_info(adev->dev, "Using ATPX for runtime pm\n");
+ } else if (amdgpu_device_supports_boco(adev)) {
+ /* enable boco as runtime mode */
+ adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO;
+ dev_info(adev->dev, "Using BOCO for runtime pm\n");
+ } else {
+ if (!bamaco_support)
+ goto no_runtime_pm;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
+ /* BACO are not supported on vega20 and arctrus */
+ break;
+ case CHIP_VEGA10:
+ /* enable BACO as runpm mode if noretry=0 */
+ if (!adev->gmc.noretry && !amdgpu_passthrough(adev))
+ adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
+ break;
+ default:
+ /* enable BACO as runpm mode on CI+ */
+ if (!amdgpu_passthrough(adev))
+ adev->pm.rpm_mode = AMDGPU_RUNPM_BACO;
+ break;
+ }
+
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) {
+ if (bamaco_support & MACO_SUPPORT) {
+ adev->pm.rpm_mode = AMDGPU_RUNPM_BAMACO;
+ dev_info(adev->dev, "Using BAMACO for runtime pm\n");
+ } else {
+ dev_info(adev->dev, "Using BACO for runtime pm\n");
+ }
+ }
+ }
+ break;
+ case 0:
+ dev_info(adev->dev, "runtime pm is manually disabled\n");
+ break;
+ default:
+ break;
+ }
+
+no_runtime_pm:
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE)
+ dev_info(adev->dev, "Runtime PM not available\n");
+}
+/**
+ * amdgpu_device_supports_smart_shift - Is the device dGPU with
+ * smart shift support
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Returns true if the device is a dGPU with Smart Shift support,
+ * otherwise returns false.
+ */
+bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev)
+{
+ return (amdgpu_device_supports_boco(adev) &&
+ amdgpu_acpi_is_power_shift_control_supported());
+}
+
/*
- * MMIO register access helper functions.
+ * VRAM access helper functions
*/
-uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
- uint32_t acc_flags)
+
+/**
+ * amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
+ *
+ * @adev: amdgpu_device pointer
+ * @pos: offset of the buffer in vram
+ * @buf: virtual address of the buffer in system memory
+ * @size: read/write size, sizeof(@buf) must > @size
+ * @write: true - write to vram, otherwise - read from vram
+ */
+void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
+ void *buf, size_t size, bool write)
{
- uint32_t ret;
+ unsigned long flags;
+ uint32_t hi = ~0, tmp = 0;
+ uint32_t *data = buf;
+ uint64_t last;
+ int idx;
+
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
+ return;
+
+ BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
- if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
- BUG_ON(in_interrupt());
- return amdgpu_virt_kiq_rreg(adev, reg);
+ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+ for (last = pos + size; pos < last; pos += 4) {
+ tmp = pos >> 31;
+
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
+ if (tmp != hi) {
+ WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
+ hi = tmp;
+ }
+ if (write)
+ WREG32_NO_KIQ(mmMM_DATA, *data++);
+ else
+ *data++ = RREG32_NO_KIQ(mmMM_DATA);
}
- if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
- ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
- else {
- unsigned long flags;
+ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+ drm_dev_exit(idx);
+}
+
+/**
+ * amdgpu_device_aper_access - access vram by vram aperture
+ *
+ * @adev: amdgpu_device pointer
+ * @pos: offset of the buffer in vram
+ * @buf: virtual address of the buffer in system memory
+ * @size: read/write size, sizeof(@buf) must > @size
+ * @write: true - write to vram, otherwise - read from vram
+ *
+ * The return value means how many bytes have been transferred.
+ */
+size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
+ void *buf, size_t size, bool write)
+{
+#ifdef CONFIG_64BIT
+ void __iomem *addr;
+ size_t count = 0;
+ uint64_t last;
+
+ if (!adev->mman.aper_base_kaddr)
+ return 0;
+
+ last = min(pos + size, adev->gmc.visible_vram_size);
+ if (last > pos) {
+ addr = adev->mman.aper_base_kaddr + pos;
+ count = last - pos;
+
+ if (write) {
+ memcpy_toio(addr, buf, count);
+ /* Make sure HDP write cache flush happens without any reordering
+ * after the system memory contents are sent over PCIe device
+ */
+ mb();
+ amdgpu_device_flush_hdp(adev, NULL);
+ } else {
+ amdgpu_device_invalidate_hdp(adev, NULL);
+ /* Make sure HDP read cache is invalidated before issuing a read
+ * to the PCIe device
+ */
+ mb();
+ memcpy_fromio(buf, addr, count);
+ }
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
- ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
}
- trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
- return ret;
+
+ return count;
+#else
+ return 0;
+#endif
}
-void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
- uint32_t acc_flags)
+/**
+ * amdgpu_device_vram_access - read/write a buffer in vram
+ *
+ * @adev: amdgpu_device pointer
+ * @pos: offset of the buffer in vram
+ * @buf: virtual address of the buffer in system memory
+ * @size: read/write size, sizeof(@buf) must > @size
+ * @write: true - write to vram, otherwise - read from vram
+ */
+void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
+ void *buf, size_t size, bool write)
{
- trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
+ size_t count;
- if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) {
- BUG_ON(in_interrupt());
- return amdgpu_virt_kiq_wreg(adev, reg, v);
+ /* try to using vram apreature to access vram first */
+ count = amdgpu_device_aper_access(adev, pos, buf, size, write);
+ size -= count;
+ if (size) {
+ /* using MM to access rest vram */
+ pos += count;
+ buf += count;
+ amdgpu_device_mm_access(adev, pos, buf, size, write);
}
+}
- if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
- writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
- else {
- unsigned long flags;
+/*
+ * register access helper functions.
+ */
+
+/* Check if hw access should be skipped because of hotplug or device error */
+bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
+{
+ if (adev->no_hw_access)
+ return true;
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
- writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+#ifdef CONFIG_LOCKDEP
+ /*
+ * This is a bit complicated to understand, so worth a comment. What we assert
+ * here is that the GPU reset is not running on another thread in parallel.
+ *
+ * For this we trylock the read side of the reset semaphore, if that succeeds
+ * we know that the reset is not running in parallel.
+ *
+ * If the trylock fails we assert that we are either already holding the read
+ * side of the lock or are the reset thread itself and hold the write side of
+ * the lock.
+ */
+ if (in_task()) {
+ if (down_read_trylock(&adev->reset_domain->sem))
+ up_read(&adev->reset_domain->sem);
+ else
+ lockdep_assert_held(&adev->reset_domain->sem);
}
+#endif
+ return false;
}
-u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
+/**
+ * amdgpu_device_rreg - read a memory mapped IO or indirect register
+ *
+ * @adev: amdgpu_device pointer
+ * @reg: dword aligned register offset
+ * @acc_flags: access flags which require special behavior
+ *
+ * Returns the 32 bit value from the offset specified.
+ */
+uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
+ uint32_t reg, uint32_t acc_flags)
{
- if ((reg * 4) < adev->rio_mem_size)
- return ioread32(adev->rio_mem + (reg * 4));
- else {
- iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
- return ioread32(adev->rio_mem + (mmMM_DATA * 4));
+ uint32_t ret;
+
+ if (amdgpu_device_skip_hw_access(adev))
+ return 0;
+
+ if ((reg * 4) < adev->rmmio_size) {
+ if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
+ amdgpu_sriov_runtime(adev) &&
+ down_read_trylock(&adev->reset_domain->sem)) {
+ ret = amdgpu_kiq_rreg(adev, reg, 0);
+ up_read(&adev->reset_domain->sem);
+ } else {
+ ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
+ }
+ } else {
+ ret = adev->pcie_rreg(adev, reg * 4);
}
+
+ trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
+
+ return ret;
+}
+
+/*
+ * MMIO register read with bytes helper functions
+ * @offset:bytes offset from MMIO start
+ */
+
+/**
+ * amdgpu_mm_rreg8 - read a memory mapped IO register
+ *
+ * @adev: amdgpu_device pointer
+ * @offset: byte aligned register offset
+ *
+ * Returns the 8 bit value from the offset specified.
+ */
+uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
+{
+ if (amdgpu_device_skip_hw_access(adev))
+ return 0;
+
+ if (offset < adev->rmmio_size)
+ return (readb(adev->rmmio + offset));
+ BUG();
}
-void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
+
+/**
+ * amdgpu_device_xcc_rreg - read a memory mapped IO or indirect register with specific XCC
+ *
+ * @adev: amdgpu_device pointer
+ * @reg: dword aligned register offset
+ * @acc_flags: access flags which require special behavior
+ * @xcc_id: xcc accelerated compute core id
+ *
+ * Returns the 32 bit value from the offset specified.
+ */
+uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev,
+ uint32_t reg, uint32_t acc_flags,
+ uint32_t xcc_id)
{
+ uint32_t ret, rlcg_flag;
- if ((reg * 4) < adev->rio_mem_size)
- iowrite32(v, adev->rio_mem + (reg * 4));
- else {
- iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
- iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
+ if (amdgpu_device_skip_hw_access(adev))
+ return 0;
+
+ if ((reg * 4) < adev->rmmio_size) {
+ if (amdgpu_sriov_vf(adev) &&
+ !amdgpu_sriov_runtime(adev) &&
+ adev->gfx.rlc.rlcg_reg_access_supported &&
+ amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
+ GC_HWIP, false,
+ &rlcg_flag)) {
+ ret = amdgpu_virt_rlcg_reg_rw(adev, reg, 0, rlcg_flag, GET_INST(GC, xcc_id));
+ } else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
+ amdgpu_sriov_runtime(adev) &&
+ down_read_trylock(&adev->reset_domain->sem)) {
+ ret = amdgpu_kiq_rreg(adev, reg, xcc_id);
+ up_read(&adev->reset_domain->sem);
+ } else {
+ ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
+ }
+ } else {
+ ret = adev->pcie_rreg(adev, reg * 4);
}
+
+ return ret;
}
+/*
+ * MMIO register write with bytes helper functions
+ * @offset:bytes offset from MMIO start
+ * @value: the value want to be written to the register
+ */
+
/**
- * amdgpu_mm_rdoorbell - read a doorbell dword
+ * amdgpu_mm_wreg8 - read a memory mapped IO register
*
* @adev: amdgpu_device pointer
- * @index: doorbell index
+ * @offset: byte aligned register offset
+ * @value: 8 bit value to write
*
- * Returns the value in the doorbell aperture at the
- * requested doorbell index (CIK).
+ * Writes the value specified to the offset specified.
*/
-u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
+void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
{
- if (index < adev->doorbell.num_doorbells) {
- return readl(adev->doorbell.ptr + index);
+ if (amdgpu_device_skip_hw_access(adev))
+ return;
+
+ if (offset < adev->rmmio_size)
+ writeb(value, adev->rmmio + offset);
+ else
+ BUG();
+}
+
+/**
+ * amdgpu_device_wreg - write to a memory mapped IO or indirect register
+ *
+ * @adev: amdgpu_device pointer
+ * @reg: dword aligned register offset
+ * @v: 32 bit value to write to the register
+ * @acc_flags: access flags which require special behavior
+ *
+ * Writes the value specified to the offset specified.
+ */
+void amdgpu_device_wreg(struct amdgpu_device *adev,
+ uint32_t reg, uint32_t v,
+ uint32_t acc_flags)
+{
+ if (amdgpu_device_skip_hw_access(adev))
+ return;
+
+ if ((reg * 4) < adev->rmmio_size) {
+ if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
+ amdgpu_sriov_runtime(adev) &&
+ down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_kiq_wreg(adev, reg, v, 0);
+ up_read(&adev->reset_domain->sem);
+ } else {
+ writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
+ }
} else {
- DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
- return 0;
+ adev->pcie_wreg(adev, reg * 4, v);
}
+
+ trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
}
/**
- * amdgpu_mm_wdoorbell - write a doorbell dword
+ * amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range
*
* @adev: amdgpu_device pointer
- * @index: doorbell index
+ * @reg: mmio/rlc register
* @v: value to write
+ * @xcc_id: xcc accelerated compute core id
*
- * Writes @v to the doorbell aperture at the
- * requested doorbell index (CIK).
+ * this function is invoked only for the debugfs register access
*/
-void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
+void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
+ uint32_t reg, uint32_t v,
+ uint32_t xcc_id)
{
- if (index < adev->doorbell.num_doorbells) {
- writel(v, adev->doorbell.ptr + index);
+ if (amdgpu_device_skip_hw_access(adev))
+ return;
+
+ if (amdgpu_sriov_fullaccess(adev) &&
+ adev->gfx.rlc.funcs &&
+ adev->gfx.rlc.funcs->is_rlcg_access_range) {
+ if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
+ return amdgpu_sriov_wreg(adev, reg, v, 0, 0, xcc_id);
+ } else if ((reg * 4) >= adev->rmmio_size) {
+ adev->pcie_wreg(adev, reg * 4, v);
} else {
- DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
+ writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
}
}
/**
- * amdgpu_mm_rdoorbell64 - read a doorbell Qword
+ * amdgpu_device_xcc_wreg - write to a memory mapped IO or indirect register with specific XCC
*
* @adev: amdgpu_device pointer
- * @index: doorbell index
+ * @reg: dword aligned register offset
+ * @v: 32 bit value to write to the register
+ * @acc_flags: access flags which require special behavior
+ * @xcc_id: xcc accelerated compute core id
*
- * Returns the value in the doorbell aperture at the
- * requested doorbell index (VEGA10+).
+ * Writes the value specified to the offset specified.
*/
-u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
+void amdgpu_device_xcc_wreg(struct amdgpu_device *adev,
+ uint32_t reg, uint32_t v,
+ uint32_t acc_flags, uint32_t xcc_id)
{
- if (index < adev->doorbell.num_doorbells) {
- return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
+ uint32_t rlcg_flag;
+
+ if (amdgpu_device_skip_hw_access(adev))
+ return;
+
+ if ((reg * 4) < adev->rmmio_size) {
+ if (amdgpu_sriov_vf(adev) &&
+ !amdgpu_sriov_runtime(adev) &&
+ adev->gfx.rlc.rlcg_reg_access_supported &&
+ amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
+ GC_HWIP, true,
+ &rlcg_flag)) {
+ amdgpu_virt_rlcg_reg_rw(adev, reg, v, rlcg_flag, GET_INST(GC, xcc_id));
+ } else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
+ amdgpu_sriov_runtime(adev) &&
+ down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_kiq_wreg(adev, reg, v, xcc_id);
+ up_read(&adev->reset_domain->sem);
+ } else {
+ writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
+ }
} else {
- DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
- return 0;
+ adev->pcie_wreg(adev, reg * 4, v);
}
}
/**
- * amdgpu_mm_wdoorbell64 - write a doorbell Qword
+ * amdgpu_device_indirect_rreg - read an indirect register
*
* @adev: amdgpu_device pointer
- * @index: doorbell index
- * @v: value to write
+ * @reg_addr: indirect register address to read from
*
- * Writes @v to the doorbell aperture at the
- * requested doorbell index (VEGA10+).
+ * Returns the value of indirect register @reg_addr
*/
-void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
+u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
+ u32 reg_addr)
{
- if (index < adev->doorbell.num_doorbells) {
- atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
+ unsigned long flags, pcie_index, pcie_data;
+ void __iomem *pcie_index_offset;
+ void __iomem *pcie_data_offset;
+ u32 r;
+
+ pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
+ pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
+ pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
+
+ writel(reg_addr, pcie_index_offset);
+ readl(pcie_index_offset);
+ r = readl(pcie_data_offset);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+
+ return r;
+}
+
+u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
+ u64 reg_addr)
+{
+ unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
+ u32 r;
+ void __iomem *pcie_index_offset;
+ void __iomem *pcie_index_hi_offset;
+ void __iomem *pcie_data_offset;
+
+ if (unlikely(!adev->nbio.funcs)) {
+ pcie_index = AMDGPU_PCIE_INDEX_FALLBACK;
+ pcie_data = AMDGPU_PCIE_DATA_FALLBACK;
} else {
- DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
+ pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
+ pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
+ }
+
+ if (reg_addr >> 32) {
+ if (unlikely(!adev->nbio.funcs))
+ pcie_index_hi = AMDGPU_PCIE_INDEX_HI_FALLBACK;
+ else
+ pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
+ } else {
+ pcie_index_hi = 0;
+ }
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
+ pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
+ if (pcie_index_hi != 0)
+ pcie_index_hi_offset = (void __iomem *)adev->rmmio +
+ pcie_index_hi * 4;
+
+ writel(reg_addr, pcie_index_offset);
+ readl(pcie_index_offset);
+ if (pcie_index_hi != 0) {
+ writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
+ readl(pcie_index_hi_offset);
+ }
+ r = readl(pcie_data_offset);
+
+ /* clear the high bits */
+ if (pcie_index_hi != 0) {
+ writel(0, pcie_index_hi_offset);
+ readl(pcie_index_hi_offset);
+ }
+
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+
+ return r;
+}
+
+/**
+ * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
+ *
+ * @adev: amdgpu_device pointer
+ * @reg_addr: indirect register address to read from
+ *
+ * Returns the value of indirect register @reg_addr
+ */
+u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
+ u32 reg_addr)
+{
+ unsigned long flags, pcie_index, pcie_data;
+ void __iomem *pcie_index_offset;
+ void __iomem *pcie_data_offset;
+ u64 r;
+
+ pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
+ pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
+ pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
+
+ /* read low 32 bits */
+ writel(reg_addr, pcie_index_offset);
+ readl(pcie_index_offset);
+ r = readl(pcie_data_offset);
+ /* read high 32 bits */
+ writel(reg_addr + 4, pcie_index_offset);
+ readl(pcie_index_offset);
+ r |= ((u64)readl(pcie_data_offset) << 32);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+
+ return r;
+}
+
+u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev,
+ u64 reg_addr)
+{
+ unsigned long flags, pcie_index, pcie_data;
+ unsigned long pcie_index_hi = 0;
+ void __iomem *pcie_index_offset;
+ void __iomem *pcie_index_hi_offset;
+ void __iomem *pcie_data_offset;
+ u64 r;
+
+ pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
+ pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
+ if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
+ pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
+ pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
+ if (pcie_index_hi != 0)
+ pcie_index_hi_offset = (void __iomem *)adev->rmmio +
+ pcie_index_hi * 4;
+
+ /* read low 32 bits */
+ writel(reg_addr, pcie_index_offset);
+ readl(pcie_index_offset);
+ if (pcie_index_hi != 0) {
+ writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
+ readl(pcie_index_hi_offset);
+ }
+ r = readl(pcie_data_offset);
+ /* read high 32 bits */
+ writel(reg_addr + 4, pcie_index_offset);
+ readl(pcie_index_offset);
+ if (pcie_index_hi != 0) {
+ writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
+ readl(pcie_index_hi_offset);
+ }
+ r |= ((u64)readl(pcie_data_offset) << 32);
+
+ /* clear the high bits */
+ if (pcie_index_hi != 0) {
+ writel(0, pcie_index_hi_offset);
+ readl(pcie_index_hi_offset);
+ }
+
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+
+ return r;
+}
+
+/**
+ * amdgpu_device_indirect_wreg - write an indirect register address
+ *
+ * @adev: amdgpu_device pointer
+ * @reg_addr: indirect register offset
+ * @reg_data: indirect register data
+ *
+ */
+void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
+ u32 reg_addr, u32 reg_data)
+{
+ unsigned long flags, pcie_index, pcie_data;
+ void __iomem *pcie_index_offset;
+ void __iomem *pcie_data_offset;
+
+ pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
+ pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
+ pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
+
+ writel(reg_addr, pcie_index_offset);
+ readl(pcie_index_offset);
+ writel(reg_data, pcie_data_offset);
+ readl(pcie_data_offset);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
+ u64 reg_addr, u32 reg_data)
+{
+ unsigned long flags, pcie_index, pcie_index_hi, pcie_data;
+ void __iomem *pcie_index_offset;
+ void __iomem *pcie_index_hi_offset;
+ void __iomem *pcie_data_offset;
+
+ pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
+ pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
+ if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
+ pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
+ else
+ pcie_index_hi = 0;
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
+ pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
+ if (pcie_index_hi != 0)
+ pcie_index_hi_offset = (void __iomem *)adev->rmmio +
+ pcie_index_hi * 4;
+
+ writel(reg_addr, pcie_index_offset);
+ readl(pcie_index_offset);
+ if (pcie_index_hi != 0) {
+ writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
+ readl(pcie_index_hi_offset);
+ }
+ writel(reg_data, pcie_data_offset);
+ readl(pcie_data_offset);
+
+ /* clear the high bits */
+ if (pcie_index_hi != 0) {
+ writel(0, pcie_index_hi_offset);
+ readl(pcie_index_hi_offset);
}
+
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+/**
+ * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
+ *
+ * @adev: amdgpu_device pointer
+ * @reg_addr: indirect register offset
+ * @reg_data: indirect register data
+ *
+ */
+void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
+ u32 reg_addr, u64 reg_data)
+{
+ unsigned long flags, pcie_index, pcie_data;
+ void __iomem *pcie_index_offset;
+ void __iomem *pcie_data_offset;
+
+ pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
+ pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
+ pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
+
+ /* write low 32 bits */
+ writel(reg_addr, pcie_index_offset);
+ readl(pcie_index_offset);
+ writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
+ readl(pcie_data_offset);
+ /* write high 32 bits */
+ writel(reg_addr + 4, pcie_index_offset);
+ readl(pcie_index_offset);
+ writel((u32)(reg_data >> 32), pcie_data_offset);
+ readl(pcie_data_offset);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev,
+ u64 reg_addr, u64 reg_data)
+{
+ unsigned long flags, pcie_index, pcie_data;
+ unsigned long pcie_index_hi = 0;
+ void __iomem *pcie_index_offset;
+ void __iomem *pcie_index_hi_offset;
+ void __iomem *pcie_data_offset;
+
+ pcie_index = adev->nbio.funcs->get_pcie_index_offset(adev);
+ pcie_data = adev->nbio.funcs->get_pcie_data_offset(adev);
+ if ((reg_addr >> 32) && (adev->nbio.funcs->get_pcie_index_hi_offset))
+ pcie_index_hi = adev->nbio.funcs->get_pcie_index_hi_offset(adev);
+
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
+ pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
+ if (pcie_index_hi != 0)
+ pcie_index_hi_offset = (void __iomem *)adev->rmmio +
+ pcie_index_hi * 4;
+
+ /* write low 32 bits */
+ writel(reg_addr, pcie_index_offset);
+ readl(pcie_index_offset);
+ if (pcie_index_hi != 0) {
+ writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
+ readl(pcie_index_hi_offset);
+ }
+ writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
+ readl(pcie_data_offset);
+ /* write high 32 bits */
+ writel(reg_addr + 4, pcie_index_offset);
+ readl(pcie_index_offset);
+ if (pcie_index_hi != 0) {
+ writel((reg_addr >> 32) & 0xff, pcie_index_hi_offset);
+ readl(pcie_index_hi_offset);
+ }
+ writel((u32)(reg_data >> 32), pcie_data_offset);
+ readl(pcie_data_offset);
+
+ /* clear the high bits */
+ if (pcie_index_hi != 0) {
+ writel(0, pcie_index_hi_offset);
+ readl(pcie_index_hi_offset);
+ }
+
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+/**
+ * amdgpu_device_get_rev_id - query device rev_id
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Return device rev_id
+ */
+u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev)
+{
+ return adev->nbio.funcs->get_rev_id(adev);
}
/**
* amdgpu_invalid_rreg - dummy reg read function
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @reg: offset of register
*
* Dummy register read function. Used for register blocks
@@ -247,7 +1283,14 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
*/
static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
{
- DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
+ dev_err(adev->dev, "Invalid callback to read register 0x%04X\n", reg);
+ BUG();
+ return 0;
+}
+
+static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg)
+{
+ dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg);
BUG();
return 0;
}
@@ -255,7 +1298,7 @@ static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
/**
* amdgpu_invalid_wreg - dummy reg write function
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @reg: offset of register
* @v: value to write to the register
*
@@ -264,15 +1307,75 @@ static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
*/
static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
{
- DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
- reg, v);
+ dev_err(adev->dev,
+ "Invalid callback to write register 0x%04X with 0x%08X\n", reg,
+ v);
+ BUG();
+}
+
+static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v)
+{
+ dev_err(adev->dev,
+ "Invalid callback to write register 0x%llX with 0x%08X\n", reg,
+ v);
+ BUG();
+}
+
+/**
+ * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
+ *
+ * @adev: amdgpu_device pointer
+ * @reg: offset of register
+ *
+ * Dummy register read function. Used for register blocks
+ * that certain asics don't have (all asics).
+ * Returns the value in the register.
+ */
+static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
+{
+ dev_err(adev->dev, "Invalid callback to read 64 bit register 0x%04X\n",
+ reg);
+ BUG();
+ return 0;
+}
+
+static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg)
+{
+ dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg);
+ BUG();
+ return 0;
+}
+
+/**
+ * amdgpu_invalid_wreg64 - dummy reg write function
+ *
+ * @adev: amdgpu_device pointer
+ * @reg: offset of register
+ * @v: value to write to the register
+ *
+ * Dummy register read function. Used for register blocks
+ * that certain asics don't have (all asics).
+ */
+static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
+{
+ dev_err(adev->dev,
+ "Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
+ reg, v);
+ BUG();
+}
+
+static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v)
+{
+ dev_err(adev->dev,
+ "Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n",
+ reg, v);
BUG();
}
/**
* amdgpu_block_invalid_rreg - dummy reg read function
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @block: offset of instance
* @reg: offset of register
*
@@ -283,8 +1386,9 @@ static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32
static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
uint32_t block, uint32_t reg)
{
- DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
- reg, block);
+ dev_err(adev->dev,
+ "Invalid callback to read register 0x%04X in block 0x%04X\n",
+ reg, block);
BUG();
return 0;
}
@@ -292,7 +1396,7 @@ static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
/**
* amdgpu_block_invalid_wreg - dummy reg write function
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu_device pointer
* @block: offset of instance
* @reg: offset of register
* @v: value to write to the register
@@ -304,73 +1408,103 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
uint32_t block,
uint32_t reg, uint32_t v)
{
- DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
- reg, block, v);
+ dev_err(adev->dev,
+ "Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
+ reg, block, v);
BUG();
}
-static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
+static uint32_t amdgpu_device_get_vbios_flags(struct amdgpu_device *adev)
{
- int r;
+ if (hweight32(adev->aid_mask) && (adev->flags & AMD_IS_APU))
+ return AMDGPU_VBIOS_SKIP;
- if (adev->vram_scratch.robj == NULL) {
- r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
- PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &adev->vram_scratch.robj);
- if (r) {
- return r;
- }
- }
+ if (hweight32(adev->aid_mask) && amdgpu_passthrough(adev))
+ return AMDGPU_VBIOS_OPTIONAL;
- r = amdgpu_bo_reserve(adev->vram_scratch.robj, false);
- if (unlikely(r != 0))
- return r;
- r = amdgpu_bo_pin(adev->vram_scratch.robj,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->vram_scratch.gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(adev->vram_scratch.robj);
- return r;
+ return 0;
+}
+
+/**
+ * amdgpu_device_asic_init - Wrapper for atom asic_init
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Does any asic specific work and then calls atom asic init.
+ */
+static int amdgpu_device_asic_init(struct amdgpu_device *adev)
+{
+ uint32_t flags;
+ bool optional;
+ int ret;
+
+ amdgpu_asic_pre_asic_init(adev);
+ flags = amdgpu_device_get_vbios_flags(adev);
+ optional = !!(flags & (AMDGPU_VBIOS_OPTIONAL | AMDGPU_VBIOS_SKIP));
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
+ amdgpu_psp_wait_for_bootloader(adev);
+ if (optional && !adev->bios)
+ return 0;
+
+ ret = amdgpu_atomfirmware_asic_init(adev, true);
+ return ret;
+ } else {
+ if (optional && !adev->bios)
+ return 0;
+
+ return amdgpu_atom_asic_init(adev->mode_info.atom_context);
}
- r = amdgpu_bo_kmap(adev->vram_scratch.robj,
- (void **)&adev->vram_scratch.ptr);
- if (r)
- amdgpu_bo_unpin(adev->vram_scratch.robj);
- amdgpu_bo_unreserve(adev->vram_scratch.robj);
- return r;
+ return 0;
}
-static void amdgpu_vram_scratch_fini(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocates a scratch page of VRAM for use by various things in the
+ * driver.
+ */
+static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
{
- int r;
+ return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->mem_scratch.robj,
+ &adev->mem_scratch.gpu_addr,
+ (void **)&adev->mem_scratch.ptr);
+}
- if (adev->vram_scratch.robj == NULL) {
- return;
- }
- r = amdgpu_bo_reserve(adev->vram_scratch.robj, true);
- if (likely(r == 0)) {
- amdgpu_bo_kunmap(adev->vram_scratch.robj);
- amdgpu_bo_unpin(adev->vram_scratch.robj);
- amdgpu_bo_unreserve(adev->vram_scratch.robj);
- }
- amdgpu_bo_unref(&adev->vram_scratch.robj);
+/**
+ * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Frees the VRAM scratch page.
+ */
+static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
+{
+ amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
}
/**
- * amdgpu_program_register_sequence - program an array of registers.
+ * amdgpu_device_program_register_sequence - program an array of registers.
*
* @adev: amdgpu_device pointer
* @registers: pointer to the register array
* @array_size: size of the register array
*
- * Programs an array or registers with and and or masks.
+ * Programs an array or registers with and or masks.
* This is a helper for setting golden registers.
*/
-void amdgpu_program_register_sequence(struct amdgpu_device *adev,
- const u32 *registers,
- const u32 array_size)
+void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
+ const u32 *registers,
+ const u32 array_size)
{
u32 tmp, reg, and_mask, or_mask;
int i;
@@ -378,7 +1512,7 @@ void amdgpu_program_register_sequence(struct amdgpu_device *adev,
if (array_size % 3)
return;
- for (i = 0; i < array_size; i +=3) {
+ for (i = 0; i < array_size; i += 3) {
reg = registers[i + 0];
and_mask = registers[i + 1];
or_mask = registers[i + 2];
@@ -388,110 +1522,55 @@ void amdgpu_program_register_sequence(struct amdgpu_device *adev,
} else {
tmp = RREG32(reg);
tmp &= ~and_mask;
- tmp |= or_mask;
+ if (adev->family >= AMDGPU_FAMILY_AI)
+ tmp |= (or_mask & and_mask);
+ else
+ tmp |= or_mask;
}
WREG32(reg, tmp);
}
}
-void amdgpu_pci_config_reset(struct amdgpu_device *adev)
-{
- pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
-}
-
-/*
- * GPU doorbell aperture helpers function.
- */
/**
- * amdgpu_doorbell_init - Init doorbell driver information.
+ * amdgpu_device_pci_config_reset - reset the GPU
*
* @adev: amdgpu_device pointer
*
- * Init doorbell driver information (CIK)
- * Returns 0 on success, error on failure.
+ * Resets the GPU using the pci config reset sequence.
+ * Only applicable to asics prior to vega10.
*/
-static int amdgpu_doorbell_init(struct amdgpu_device *adev)
+void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
{
- /* doorbell bar mapping */
- adev->doorbell.base = pci_resource_start(adev->pdev, 2);
- adev->doorbell.size = pci_resource_len(adev->pdev, 2);
-
- adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
- AMDGPU_DOORBELL_MAX_ASSIGNMENT+1);
- if (adev->doorbell.num_doorbells == 0)
- return -EINVAL;
-
- adev->doorbell.ptr = ioremap(adev->doorbell.base,
- adev->doorbell.num_doorbells *
- sizeof(u32));
- if (adev->doorbell.ptr == NULL)
- return -ENOMEM;
-
- return 0;
-}
-
-/**
- * amdgpu_doorbell_fini - Tear down doorbell driver information.
- *
- * @adev: amdgpu_device pointer
- *
- * Tear down doorbell driver information (CIK)
- */
-static void amdgpu_doorbell_fini(struct amdgpu_device *adev)
-{
- iounmap(adev->doorbell.ptr);
- adev->doorbell.ptr = NULL;
+ pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
}
/**
- * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
- * setup amdkfd
+ * amdgpu_device_pci_reset - reset the GPU using generic PCI means
*
* @adev: amdgpu_device pointer
- * @aperture_base: output returning doorbell aperture base physical address
- * @aperture_size: output returning doorbell aperture size in bytes
- * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
*
- * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
- * takes doorbells required for its own rings and reports the setup to amdkfd.
- * amdgpu reserved doorbells are at the start of the doorbell aperture.
+ * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
*/
-void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
- phys_addr_t *aperture_base,
- size_t *aperture_size,
- size_t *start_offset)
+int amdgpu_device_pci_reset(struct amdgpu_device *adev)
{
- /*
- * The first num_doorbells are used by amdgpu.
- * amdkfd takes whatever's left in the aperture.
- */
- if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
- *aperture_base = adev->doorbell.base;
- *aperture_size = adev->doorbell.size;
- *start_offset = adev->doorbell.num_doorbells * sizeof(u32);
- } else {
- *aperture_base = 0;
- *aperture_size = 0;
- *start_offset = 0;
- }
+ return pci_reset_function(adev->pdev);
}
/*
- * amdgpu_wb_*()
- * Writeback is the the method by which the the GPU updates special pages
- * in memory with the status of certain GPU events (fences, ring pointers,
- * etc.).
+ * amdgpu_device_wb_*()
+ * Writeback is the method by which the GPU updates special pages in memory
+ * with the status of certain GPU events (fences, ring pointers,etc.).
*/
/**
- * amdgpu_wb_fini - Disable Writeback and free memory
+ * amdgpu_device_wb_fini - Disable Writeback and free memory
*
* @adev: amdgpu_device pointer
*
* Disables Writeback and frees the Writeback memory (all asics).
* Used at driver shutdown.
*/
-static void amdgpu_wb_fini(struct amdgpu_device *adev)
+static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
{
if (adev->wb.wb_obj) {
amdgpu_bo_free_kernel(&adev->wb.wb_obj,
@@ -502,20 +1581,21 @@ static void amdgpu_wb_fini(struct amdgpu_device *adev)
}
/**
- * amdgpu_wb_init- Init Writeback driver info and allocate memory
+ * amdgpu_device_wb_init - Init Writeback driver info and allocate memory
*
* @adev: amdgpu_device pointer
*
- * Disables Writeback and frees the Writeback memory (all asics).
+ * Initializes writeback and allocates writeback memory (all asics).
* Used at driver startup.
* Returns 0 on success or an -error on failure.
*/
-static int amdgpu_wb_init(struct amdgpu_device *adev)
+static int amdgpu_device_wb_init(struct amdgpu_device *adev)
{
int r;
if (adev->wb.wb_obj == NULL) {
- r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t),
+ /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
&adev->wb.wb_obj, &adev->wb.gpu_addr,
(void **)&adev->wb.wb);
@@ -528,14 +1608,14 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
memset(&adev->wb.used, 0, sizeof(adev->wb.used));
/* clear wb memory */
- memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t));
+ memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
}
return 0;
}
/**
- * amdgpu_wb_get - Allocate a wb entry
+ * amdgpu_device_wb_get - Allocate a wb entry
*
* @adev: amdgpu_device pointer
* @wb: wb index
@@ -543,162 +1623,145 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
* Allocate a wb slot for use by the driver (all asics).
* Returns 0 on success or -EINVAL on failure.
*/
-int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb)
+int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
{
- unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
- if (offset < adev->wb.num_wb) {
- __set_bit(offset, adev->wb.used);
- *wb = offset;
- return 0;
- } else {
- return -EINVAL;
- }
-}
+ unsigned long flags, offset;
-/**
- * amdgpu_wb_get_64bit - Allocate a wb entry
- *
- * @adev: amdgpu_device pointer
- * @wb: wb index
- *
- * Allocate a wb slot for use by the driver (all asics).
- * Returns 0 on success or -EINVAL on failure.
- */
-int amdgpu_wb_get_64bit(struct amdgpu_device *adev, u32 *wb)
-{
- unsigned long offset = bitmap_find_next_zero_area_off(adev->wb.used,
- adev->wb.num_wb, 0, 2, 7, 0);
- if ((offset + 1) < adev->wb.num_wb) {
+ spin_lock_irqsave(&adev->wb.lock, flags);
+ offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);
+ if (offset < adev->wb.num_wb) {
__set_bit(offset, adev->wb.used);
- __set_bit(offset + 1, adev->wb.used);
- *wb = offset;
+ spin_unlock_irqrestore(&adev->wb.lock, flags);
+ *wb = offset << 3; /* convert to dw offset */
return 0;
} else {
+ spin_unlock_irqrestore(&adev->wb.lock, flags);
return -EINVAL;
}
}
/**
- * amdgpu_wb_free - Free a wb entry
+ * amdgpu_device_wb_free - Free a wb entry
*
* @adev: amdgpu_device pointer
* @wb: wb index
*
* Free a wb slot allocated for use by the driver (all asics)
*/
-void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb)
+void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
{
+ unsigned long flags;
+
+ wb >>= 3;
+ spin_lock_irqsave(&adev->wb.lock, flags);
if (wb < adev->wb.num_wb)
__clear_bit(wb, adev->wb.used);
+ spin_unlock_irqrestore(&adev->wb.lock, flags);
}
/**
- * amdgpu_wb_free_64bit - Free a wb entry
+ * amdgpu_device_resize_fb_bar - try to resize FB BAR
*
* @adev: amdgpu_device pointer
- * @wb: wb index
*
- * Free a wb slot allocated for use by the driver (all asics)
+ * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
+ * to fail, but if any of the BARs is not accessible after the size we abort
+ * driver loading by returning -ENODEV.
*/
-void amdgpu_wb_free_64bit(struct amdgpu_device *adev, u32 wb)
+int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
{
- if ((wb + 1) < adev->wb.num_wb) {
- __clear_bit(wb, adev->wb.used);
- __clear_bit(wb + 1, adev->wb.used);
- }
-}
+ int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
+ struct pci_bus *root;
+ struct resource *res;
+ unsigned int i;
+ u16 cmd;
+ int r;
-/**
- * amdgpu_vram_location - try to find VRAM location
- * @adev: amdgpu device structure holding all necessary informations
- * @mc: memory controller structure holding memory informations
- * @base: base address at which to put VRAM
- *
- * Function will place try to place VRAM at base address provided
- * as parameter (which is so far either PCI aperture address or
- * for IGP TOM base address).
- *
- * If there is not enough space to fit the unvisible VRAM in the 32bits
- * address space then we limit the VRAM size to the aperture.
- *
- * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
- * this shouldn't be a problem as we are using the PCI aperture as a reference.
- * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
- * not IGP.
- *
- * Note: we use mc_vram_size as on some board we need to program the mc to
- * cover the whole aperture even if VRAM size is inferior to aperture size
- * Novell bug 204882 + along with lots of ubuntu ones
- *
- * Note: when limiting vram it's safe to overwritte real_vram_size because
- * we are not in case where real_vram_size is inferior to mc_vram_size (ie
- * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
- * ones)
- *
- * Note: IGP TOM addr should be the same as the aperture addr, we don't
- * explicitly check for that thought.
- *
- * FIXME: when reducing VRAM size align new size on power of 2.
- */
-void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base)
-{
- uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
+ if (!IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT))
+ return 0;
- mc->vram_start = base;
- if (mc->mc_vram_size > (adev->mc.mc_mask - base + 1)) {
- dev_warn(adev->dev, "limiting VRAM to PCI aperture size\n");
- mc->real_vram_size = mc->aper_size;
- mc->mc_vram_size = mc->aper_size;
- }
- mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
- if (limit && limit < mc->real_vram_size)
- mc->real_vram_size = limit;
- dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
- mc->mc_vram_size >> 20, mc->vram_start,
- mc->vram_end, mc->real_vram_size >> 20);
-}
+ /* Bypass for VF */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
-/**
- * amdgpu_gtt_location - try to find GTT location
- * @adev: amdgpu device structure holding all necessary informations
- * @mc: memory controller structure holding memory informations
- *
- * Function will place try to place GTT before or after VRAM.
- *
- * If GTT size is bigger than space left then we ajust GTT size.
- * Thus function will never fails.
- *
- * FIXME: when reducing GTT size align new size on power of 2.
- */
-void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
-{
- u64 size_af, size_bf;
+ if (!amdgpu_rebar)
+ return 0;
- size_af = ((adev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
- size_bf = mc->vram_start & ~mc->gtt_base_align;
- if (size_bf > size_af) {
- if (mc->gtt_size > size_bf) {
- dev_warn(adev->dev, "limiting GTT\n");
- mc->gtt_size = size_bf;
- }
- mc->gtt_start = 0;
- } else {
- if (mc->gtt_size > size_af) {
- dev_warn(adev->dev, "limiting GTT\n");
- mc->gtt_size = size_af;
- }
- mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
+ /* resizing on Dell G5 SE platforms causes problems with runtime pm */
+ if ((amdgpu_runtime_pm != 0) &&
+ adev->pdev->vendor == PCI_VENDOR_ID_ATI &&
+ adev->pdev->device == 0x731f &&
+ adev->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
+ return 0;
+
+ /* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
+ if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
+ dev_warn(
+ adev->dev,
+ "System can't access extended configuration space, please check!!\n");
+
+ /* skip if the bios has already enabled large BAR */
+ if (adev->gmc.real_vram_size &&
+ (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
+ return 0;
+
+ /* Check if the root BUS has 64bit memory resources */
+ root = adev->pdev->bus;
+ while (root->parent)
+ root = root->parent;
+
+ pci_bus_for_each_resource(root, res, i) {
+ if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
+ res->start > 0x100000000ull)
+ break;
}
- mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
- dev_info(adev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
- mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
+
+ /* Trying to resize is pointless without a root hub window above 4GB */
+ if (!res)
+ return 0;
+
+ /* Limit the BAR size to what is available */
+ rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
+ rbar_size);
+
+ /* Disable memory decoding while we change the BAR addresses and size */
+ pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
+ pci_write_config_word(adev->pdev, PCI_COMMAND,
+ cmd & ~PCI_COMMAND_MEMORY);
+
+ /* Free the VRAM and doorbell BAR, we most likely need to move both. */
+ amdgpu_doorbell_fini(adev);
+ if (adev->asic_type >= CHIP_BONAIRE)
+ pci_release_resource(adev->pdev, 2);
+
+ pci_release_resource(adev->pdev, 0);
+
+ r = pci_resize_resource(adev->pdev, 0, rbar_size);
+ if (r == -ENOSPC)
+ dev_info(adev->dev,
+ "Not enough PCI address space for a large BAR.");
+ else if (r && r != -ENOTSUPP)
+ dev_err(adev->dev, "Problem resizing BAR0 (%d).", r);
+
+ pci_assign_unassigned_bus_resources(adev->pdev->bus);
+
+ /* When the doorbell or fb BAR isn't available we have no chance of
+ * using the device.
+ */
+ r = amdgpu_doorbell_init(adev);
+ if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
+ return -ENODEV;
+
+ pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);
+
+ return 0;
}
/*
* GPU helpers function.
*/
/**
- * amdgpu_need_post - check if the hw need post or not
+ * amdgpu_device_need_post - check if the hw need post or not
*
* @adev: amdgpu_device pointer
*
@@ -706,27 +1769,17 @@ void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc)
* or post is needed if hw reset is performed.
* Returns true if need or false if not.
*/
-bool amdgpu_need_post(struct amdgpu_device *adev)
+bool amdgpu_device_need_post(struct amdgpu_device *adev)
{
- uint32_t reg;
+ uint32_t reg, flags;
- if (adev->has_hw_reset) {
- adev->has_hw_reset = false;
- return true;
- }
- /* then check MEM_SIZE, in case the crtcs are off */
- reg = amdgpu_asic_get_config_memsize(adev);
-
- if ((reg != 0) && (reg != 0xffffffff))
+ if (amdgpu_sriov_vf(adev))
return false;
- return true;
-
-}
-
-static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
-{
- if (amdgpu_sriov_vf(adev))
+ flags = amdgpu_device_get_vbios_flags(adev);
+ if (flags & AMDGPU_VBIOS_SKIP)
+ return false;
+ if ((flags & AMDGPU_VBIOS_OPTIONAL) && !adev->bios)
return false;
if (amdgpu_passthrough(adev)) {
@@ -738,285 +1791,168 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_FIJI) {
int err;
uint32_t fw_ver;
+
err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
- /* force vPost if error occured */
+ /* force vPost if error occurred */
if (err)
return true;
fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
+ release_firmware(adev->pm.fw);
if (fw_ver < 0x00160e00)
return true;
}
}
- return amdgpu_need_post(adev);
-}
-
-/**
- * amdgpu_dummy_page_init - init dummy page used by the driver
- *
- * @adev: amdgpu_device pointer
- *
- * Allocate the dummy page used by the driver (all asics).
- * This dummy page is used by the driver as a filler for gart entries
- * when pages are taken out of the GART
- * Returns 0 on sucess, -ENOMEM on failure.
- */
-int amdgpu_dummy_page_init(struct amdgpu_device *adev)
-{
- if (adev->dummy_page.page)
- return 0;
- adev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
- if (adev->dummy_page.page == NULL)
- return -ENOMEM;
- adev->dummy_page.addr = pci_map_page(adev->pdev, adev->dummy_page.page,
- 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(adev->pdev, adev->dummy_page.addr)) {
- dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
- __free_page(adev->dummy_page.page);
- adev->dummy_page.page = NULL;
- return -ENOMEM;
- }
- return 0;
-}
-
-/**
- * amdgpu_dummy_page_fini - free dummy page used by the driver
- *
- * @adev: amdgpu_device pointer
- *
- * Frees the dummy page used by the driver (all asics).
- */
-void amdgpu_dummy_page_fini(struct amdgpu_device *adev)
-{
- if (adev->dummy_page.page == NULL)
- return;
- pci_unmap_page(adev->pdev, adev->dummy_page.addr,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- __free_page(adev->dummy_page.page);
- adev->dummy_page.page = NULL;
-}
+ /* Don't post if we need to reset whole hive on init */
+ if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
+ return false;
-/* ATOM accessor methods */
-/*
- * ATOM is an interpreted byte code stored in tables in the vbios. The
- * driver registers callbacks to access registers and the interpreter
- * in the driver parses the tables and executes then to program specific
- * actions (set display modes, asic init, etc.). See amdgpu_atombios.c,
- * atombios.h, and atom.c
- */
+ if (adev->has_hw_reset) {
+ adev->has_hw_reset = false;
+ return true;
+ }
-/**
- * cail_pll_read - read PLL register
- *
- * @info: atom card_info pointer
- * @reg: PLL register offset
- *
- * Provides a PLL register accessor for the atom interpreter (r4xx+).
- * Returns the value of the PLL register.
- */
-static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
-{
- return 0;
-}
+ /* bios scratch used on CIK+ */
+ if (adev->asic_type >= CHIP_BONAIRE)
+ return amdgpu_atombios_scratch_need_asic_init(adev);
-/**
- * cail_pll_write - write PLL register
- *
- * @info: atom card_info pointer
- * @reg: PLL register offset
- * @val: value to write to the pll register
- *
- * Provides a PLL register accessor for the atom interpreter (r4xx+).
- */
-static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
-{
+ /* check MEM_SIZE for older asics */
+ reg = amdgpu_asic_get_config_memsize(adev);
-}
+ if ((reg != 0) && (reg != 0xffffffff))
+ return false;
-/**
- * cail_mc_read - read MC (Memory Controller) register
- *
- * @info: atom card_info pointer
- * @reg: MC register offset
- *
- * Provides an MC register accessor for the atom interpreter (r4xx+).
- * Returns the value of the MC register.
- */
-static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
-{
- return 0;
+ return true;
}
-/**
- * cail_mc_write - write MC (Memory Controller) register
- *
- * @info: atom card_info pointer
- * @reg: MC register offset
- * @val: value to write to the pll register
+/*
+ * Check whether seamless boot is supported.
*
- * Provides a MC register accessor for the atom interpreter (r4xx+).
+ * So far we only support seamless boot on DCE 3.0 or later.
+ * If users report that it works on older ASICS as well, we may
+ * loosen this.
*/
-static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
+bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev)
{
+ switch (amdgpu_seamless) {
+ case -1:
+ break;
+ case 1:
+ return true;
+ case 0:
+ return false;
+ default:
+ dev_err(adev->dev, "Invalid value for amdgpu.seamless: %d\n",
+ amdgpu_seamless);
+ return false;
+ }
-}
+ if (!(adev->flags & AMD_IS_APU))
+ return false;
-/**
- * cail_reg_write - write MMIO register
- *
- * @info: atom card_info pointer
- * @reg: MMIO register offset
- * @val: value to write to the pll register
- *
- * Provides a MMIO register accessor for the atom interpreter (r4xx+).
- */
-static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
-{
- struct amdgpu_device *adev = info->dev->dev_private;
+ if (adev->mman.keep_stolen_vga_memory)
+ return false;
- WREG32(reg, val);
+ return amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0);
}
-/**
- * cail_reg_read - read MMIO register
- *
- * @info: atom card_info pointer
- * @reg: MMIO register offset
+/*
+ * Intel hosts such as Rocket Lake, Alder Lake, Raptor Lake and Sapphire Rapids
+ * don't support dynamic speed switching. Until we have confirmation from Intel
+ * that a specific host supports it, it's safer that we keep it disabled for all.
*
- * Provides an MMIO register accessor for the atom interpreter (r4xx+).
- * Returns the value of the MMIO register.
+ * https://edc.intel.com/content/www/us/en/design/products/platforms/details/raptor-lake-s/13th-generation-core-processors-datasheet-volume-1-of-2/005/pci-express-support/
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/2663
*/
-static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
+static bool amdgpu_device_pcie_dynamic_switching_supported(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = info->dev->dev_private;
- uint32_t r;
-
- r = RREG32(reg);
- return r;
-}
+#if IS_ENABLED(CONFIG_X86)
+ struct cpuinfo_x86 *c = &cpu_data(0);
-/**
- * cail_ioreg_write - write IO register
- *
- * @info: atom card_info pointer
- * @reg: IO register offset
- * @val: value to write to the pll register
- *
- * Provides a IO register accessor for the atom interpreter (r4xx+).
- */
-static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
-{
- struct amdgpu_device *adev = info->dev->dev_private;
+ /* eGPU change speeds based on USB4 fabric conditions */
+ if (dev_is_removable(adev->dev))
+ return true;
- WREG32_IO(reg, val);
+ if (c->x86_vendor == X86_VENDOR_INTEL)
+ return false;
+#endif
+ return true;
}
-/**
- * cail_ioreg_read - read IO register
- *
- * @info: atom card_info pointer
- * @reg: IO register offset
- *
- * Provides an IO register accessor for the atom interpreter (r4xx+).
- * Returns the value of the IO register.
- */
-static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
+static bool amdgpu_device_aspm_support_quirk(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = info->dev->dev_private;
- uint32_t r;
+#if IS_ENABLED(CONFIG_X86)
+ struct cpuinfo_x86 *c = &cpu_data(0);
- r = RREG32_IO(reg);
- return r;
-}
+ if (!(amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 0) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(12, 0, 1)))
+ return false;
-/**
- * amdgpu_atombios_fini - free the driver info and callbacks for atombios
- *
- * @adev: amdgpu_device pointer
- *
- * Frees the driver info and register access callbacks for the ATOM
- * interpreter (r4xx+).
- * Called at driver shutdown.
- */
-static void amdgpu_atombios_fini(struct amdgpu_device *adev)
-{
- if (adev->mode_info.atom_context) {
- kfree(adev->mode_info.atom_context->scratch);
- kfree(adev->mode_info.atom_context->iio);
+ if (c->x86 == 6 &&
+ adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5) {
+ switch (c->x86_model) {
+ case VFM_MODEL(INTEL_ALDERLAKE):
+ case VFM_MODEL(INTEL_ALDERLAKE_L):
+ case VFM_MODEL(INTEL_RAPTORLAKE):
+ case VFM_MODEL(INTEL_RAPTORLAKE_P):
+ case VFM_MODEL(INTEL_RAPTORLAKE_S):
+ return true;
+ default:
+ return false;
+ }
+ } else {
+ return false;
}
- kfree(adev->mode_info.atom_context);
- adev->mode_info.atom_context = NULL;
- kfree(adev->mode_info.atom_card_info);
- adev->mode_info.atom_card_info = NULL;
+#else
+ return false;
+#endif
}
/**
- * amdgpu_atombios_init - init the driver info and callbacks for atombios
+ * amdgpu_device_should_use_aspm - check if the device should program ASPM
*
* @adev: amdgpu_device pointer
*
- * Initializes the driver info and register access callbacks for the
- * ATOM interpreter (r4xx+).
- * Returns 0 on sucess, -ENOMEM on failure.
- * Called at driver startup.
+ * Confirm whether the module parameter and pcie bridge agree that ASPM should
+ * be set for this device.
+ *
+ * Returns true if it should be used or false if not.
*/
-static int amdgpu_atombios_init(struct amdgpu_device *adev)
+bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
{
- struct card_info *atom_card_info =
- kzalloc(sizeof(struct card_info), GFP_KERNEL);
-
- if (!atom_card_info)
- return -ENOMEM;
-
- adev->mode_info.atom_card_info = atom_card_info;
- atom_card_info->dev = adev->ddev;
- atom_card_info->reg_read = cail_reg_read;
- atom_card_info->reg_write = cail_reg_write;
- /* needed for iio ops */
- if (adev->rio_mem) {
- atom_card_info->ioreg_read = cail_ioreg_read;
- atom_card_info->ioreg_write = cail_ioreg_write;
- } else {
- DRM_INFO("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
- atom_card_info->ioreg_read = cail_reg_read;
- atom_card_info->ioreg_write = cail_reg_write;
- }
- atom_card_info->mc_read = cail_mc_read;
- atom_card_info->mc_write = cail_mc_write;
- atom_card_info->pll_read = cail_pll_read;
- atom_card_info->pll_write = cail_pll_write;
-
- adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
- if (!adev->mode_info.atom_context) {
- amdgpu_atombios_fini(adev);
- return -ENOMEM;
- }
-
- mutex_init(&adev->mode_info.atom_context->mutex);
- if (adev->is_atom_fw) {
- amdgpu_atomfirmware_scratch_regs_init(adev);
- amdgpu_atomfirmware_allocate_fb_scratch(adev);
- } else {
- amdgpu_atombios_scratch_regs_init(adev);
- amdgpu_atombios_allocate_fb_scratch(adev);
+ switch (amdgpu_aspm) {
+ case -1:
+ break;
+ case 0:
+ return false;
+ case 1:
+ return true;
+ default:
+ return false;
}
- return 0;
+ if (adev->flags & AMD_IS_APU)
+ return false;
+ if (amdgpu_device_aspm_support_quirk(adev))
+ return false;
+ return pcie_aspm_enabled(adev->pdev);
}
/* if we get transitioned to only one device, take VGA back */
/**
- * amdgpu_vga_set_decode - enable/disable vga decode
+ * amdgpu_device_vga_set_decode - enable/disable vga decode
*
- * @cookie: amdgpu_device pointer
+ * @pdev: PCI device pointer
* @state: enable/disable vga decode
*
* Enable/disable vga decode (all asics).
* Returns VGA resource flags.
*/
-static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
+static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
+ bool state)
{
- struct amdgpu_device *adev = cookie;
+ struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
+
amdgpu_asic_set_vga_state(adev, state);
if (state)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
@@ -1026,113 +1962,222 @@ static unsigned int amdgpu_vga_set_decode(void *cookie, bool state)
}
/**
- * amdgpu_check_pot_argument - check that argument is a power of two
+ * amdgpu_device_check_block_size - validate the vm block size
*
- * @arg: value to check
+ * @adev: amdgpu_device pointer
*
- * Validates that a certain argument is a power of two (all asics).
- * Returns true if argument is valid.
+ * Validates the vm block size specified via module parameter.
+ * The vm block size defines number of bits in page table versus page directory,
+ * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
+ * page table and the remaining bits are in the page directory.
*/
-static bool amdgpu_check_pot_argument(int arg)
-{
- return (arg & (arg - 1)) == 0;
-}
-
-static void amdgpu_check_block_size(struct amdgpu_device *adev)
+static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
{
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
- * page table and the remaining bits are in the page directory */
+ * page table and the remaining bits are in the page directory
+ */
if (amdgpu_vm_block_size == -1)
return;
if (amdgpu_vm_block_size < 9) {
dev_warn(adev->dev, "VM page table size (%d) too small\n",
amdgpu_vm_block_size);
- goto def_value;
- }
-
- if (amdgpu_vm_block_size > 24 ||
- (amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
- dev_warn(adev->dev, "VM page table size (%d) too large\n",
- amdgpu_vm_block_size);
- goto def_value;
+ amdgpu_vm_block_size = -1;
}
-
- return;
-
-def_value:
- amdgpu_vm_block_size = -1;
}
-static void amdgpu_check_vm_size(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_check_vm_size - validate the vm size
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Validates the vm size in GB specified via module parameter.
+ * The VM size is the size of the GPU virtual memory space in GB.
+ */
+static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
{
- if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
- dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
- amdgpu_vm_size);
- goto def_value;
- }
+ /* no need to check the default value */
+ if (amdgpu_vm_size == -1)
+ return;
if (amdgpu_vm_size < 1) {
dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
amdgpu_vm_size);
- goto def_value;
+ amdgpu_vm_size = -1;
}
+}
- /*
- * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
- */
- if (amdgpu_vm_size > 1024) {
- dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
- amdgpu_vm_size);
+static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
+{
+ struct sysinfo si;
+ bool is_os_64 = (sizeof(void *) == 8);
+ uint64_t total_memory;
+ uint64_t dram_size_seven_GB = 0x1B8000000;
+ uint64_t dram_size_three_GB = 0xB8000000;
+
+ if (amdgpu_smu_memory_pool_size == 0)
+ return;
+
+ if (!is_os_64) {
+ dev_warn(adev->dev, "Not 64-bit OS, feature not supported\n");
goto def_value;
}
+ si_meminfo(&si);
+ total_memory = (uint64_t)si.totalram * si.mem_unit;
+
+ if ((amdgpu_smu_memory_pool_size == 1) ||
+ (amdgpu_smu_memory_pool_size == 2)) {
+ if (total_memory < dram_size_three_GB)
+ goto def_value1;
+ } else if ((amdgpu_smu_memory_pool_size == 4) ||
+ (amdgpu_smu_memory_pool_size == 8)) {
+ if (total_memory < dram_size_seven_GB)
+ goto def_value1;
+ } else {
+ dev_warn(adev->dev, "Smu memory pool size not supported\n");
+ goto def_value;
+ }
+ adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;
return;
+def_value1:
+ dev_warn(adev->dev, "No enough system memory\n");
def_value:
- amdgpu_vm_size = -1;
+ adev->pm.smu_prv_buffer_size = 0;
+}
+
+static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
+{
+ if (!(adev->flags & AMD_IS_APU) ||
+ adev->asic_type < CHIP_RAVEN)
+ return 0;
+
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+ if (adev->pdev->device == 0x15dd)
+ adev->apu_flags |= AMD_APU_IS_RAVEN;
+ if (adev->pdev->device == 0x15d8)
+ adev->apu_flags |= AMD_APU_IS_PICASSO;
+ break;
+ case CHIP_RENOIR:
+ if ((adev->pdev->device == 0x1636) ||
+ (adev->pdev->device == 0x164c))
+ adev->apu_flags |= AMD_APU_IS_RENOIR;
+ else
+ adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
+ break;
+ case CHIP_VANGOGH:
+ adev->apu_flags |= AMD_APU_IS_VANGOGH;
+ break;
+ case CHIP_YELLOW_CARP:
+ break;
+ case CHIP_CYAN_SKILLFISH:
+ if ((adev->pdev->device == 0x13FE) ||
+ (adev->pdev->device == 0x143F))
+ adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
}
/**
- * amdgpu_check_arguments - validate module params
+ * amdgpu_device_check_arguments - validate module params
*
* @adev: amdgpu_device pointer
*
* Validates certain module parameters and updates
* the associated values used by the driver (all asics).
*/
-static void amdgpu_check_arguments(struct amdgpu_device *adev)
+static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
{
+ int i;
+
if (amdgpu_sched_jobs < 4) {
dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
amdgpu_sched_jobs);
amdgpu_sched_jobs = 4;
- } else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
+ } else if (!is_power_of_2(amdgpu_sched_jobs)) {
dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
amdgpu_sched_jobs);
amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
}
- if (amdgpu_gart_size != -1) {
+ if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
+ /* gart size must be greater or equal to 32M */
+ dev_warn(adev->dev, "gart size (%d) too small\n",
+ amdgpu_gart_size);
+ amdgpu_gart_size = -1;
+ }
+
+ if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
/* gtt size must be greater or equal to 32M */
- if (amdgpu_gart_size < 32) {
- dev_warn(adev->dev, "gart size (%d) too small\n",
- amdgpu_gart_size);
- amdgpu_gart_size = -1;
- }
+ dev_warn(adev->dev, "gtt size (%d) too small\n",
+ amdgpu_gtt_size);
+ amdgpu_gtt_size = -1;
}
- amdgpu_check_vm_size(adev);
+ /* valid range is between 4 and 9 inclusive */
+ if (amdgpu_vm_fragment_size != -1 &&
+ (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
+ dev_warn(adev->dev, "valid range is between 4 and 9\n");
+ amdgpu_vm_fragment_size = -1;
+ }
+
+ if (amdgpu_sched_hw_submission < 2) {
+ dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
+ amdgpu_sched_hw_submission);
+ amdgpu_sched_hw_submission = 2;
+ } else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
+ dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
+ amdgpu_sched_hw_submission);
+ amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
+ }
+
+ if (amdgpu_reset_method < -1 || amdgpu_reset_method > 4) {
+ dev_warn(adev->dev, "invalid option for reset method, reverting to default\n");
+ amdgpu_reset_method = -1;
+ }
+
+ amdgpu_device_check_smu_prv_buffer_size(adev);
+
+ amdgpu_device_check_vm_size(adev);
+
+ amdgpu_device_check_block_size(adev);
- amdgpu_check_block_size(adev);
+ adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
- if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
- !amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
- dev_warn(adev->dev, "invalid VRAM page split (%d)\n",
- amdgpu_vram_page_split);
- amdgpu_vram_page_split = 1024;
+ for (i = 0; i < MAX_XCP; i++) {
+ switch (amdgpu_enforce_isolation) {
+ case -1:
+ case 0:
+ default:
+ /* disable */
+ adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE;
+ break;
+ case 1:
+ /* enable */
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE;
+ break;
+ case 2:
+ /* enable legacy mode */
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY;
+ break;
+ case 3:
+ /* enable only process isolation without submitting cleaner shader */
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER;
+ break;
+ }
}
+
+ return 0;
}
/**
@@ -1141,34 +2186,42 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
* @pdev: pci dev pointer
* @state: vga_switcheroo state
*
- * Callback for the switcheroo driver. Suspends or resumes the
+ * Callback for the switcheroo driver. Suspends or resumes
* the asics before or after it is powered up using ACPI methods.
*/
-static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
+static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
+ enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ int r;
- if (amdgpu_device_is_px(dev) && state == VGA_SWITCHEROO_OFF)
+ if (amdgpu_device_supports_px(drm_to_adev(dev)) &&
+ state == VGA_SWITCHEROO_OFF)
return;
if (state == VGA_SWITCHEROO_ON) {
- unsigned d3_delay = dev->pdev->d3_delay;
-
- pr_info("amdgpu: switched on\n");
+ pr_info("switched on\n");
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- amdgpu_device_resume(dev, true, true);
-
- dev->pdev->d3_delay = d3_delay;
+ pci_set_power_state(pdev, PCI_D0);
+ amdgpu_device_load_pci_state(pdev);
+ r = pci_enable_device(pdev);
+ if (r)
+ dev_warn(&pdev->dev, "pci_enable_device failed (%d)\n",
+ r);
+ amdgpu_device_resume(dev, true);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
- drm_kms_helper_poll_enable(dev);
} else {
- pr_info("amdgpu: switched off\n");
- drm_kms_helper_poll_disable(dev);
+ dev_info(&pdev->dev, "switched off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- amdgpu_device_suspend(dev, true, true);
+ amdgpu_device_prepare(dev);
+ amdgpu_device_suspend(dev, true);
+ amdgpu_device_cache_pci_state(pdev);
+ /* Shut down the device */
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3cold);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
@@ -1186,12 +2239,12 @@ static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- /*
+ /*
* FIXME: open_count is protected by drm_global_mutex but that would lead to
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return dev->open_count == 0;
+ return atomic_read(&dev->open_count) == 0;
}
static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
@@ -1200,10 +2253,22 @@ static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
.can_switch = amdgpu_switcheroo_can_switch,
};
-int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type,
- enum amd_clockgating_state state)
+/**
+ * amdgpu_device_ip_set_clockgating_state - set the CG state
+ *
+ * @dev: amdgpu_device pointer
+ * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
+ * @state: clockgating state (gate or ungate)
+ *
+ * Sets the requested clockgating state for all instances of
+ * the hardware IP specified.
+ * Returns the error code from the last instance.
+ */
+int amdgpu_device_ip_set_clockgating_state(void *dev,
+ enum amd_ip_block_type block_type,
+ enum amd_clockgating_state state)
{
+ struct amdgpu_device *adev = dev;
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -1214,18 +2279,31 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
continue;
r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
- (void *)adev, state);
+ &adev->ip_blocks[i], state);
if (r)
- DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_clockgating_state of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
return r;
}
-int amdgpu_set_powergating_state(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type,
- enum amd_powergating_state state)
+/**
+ * amdgpu_device_ip_set_powergating_state - set the PG state
+ *
+ * @dev: amdgpu_device pointer
+ * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
+ * @state: powergating state (gate or ungate)
+ *
+ * Sets the requested powergating state for all instances of
+ * the hardware IP specified.
+ * Returns the error code from the last instance.
+ */
+int amdgpu_device_ip_set_powergating_state(void *dev,
+ enum amd_ip_block_type block_type,
+ enum amd_powergating_state state)
{
+ struct amdgpu_device *adev = dev;
int i, r = 0;
for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -1236,15 +2314,28 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev,
if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
continue;
r = adev->ip_blocks[i].version->funcs->set_powergating_state(
- (void *)adev, state);
+ &adev->ip_blocks[i], state);
if (r)
- DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_powergating_state of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
}
return r;
}
-void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
+/**
+ * amdgpu_device_ip_get_clockgating_state - get the CG state
+ *
+ * @adev: amdgpu_device pointer
+ * @flags: clockgating feature flags
+ *
+ * Walks the list of IPs on the device and updates the clockgating
+ * flags for each IP.
+ * Updates @flags with the feature flags for each hardware IP where
+ * clockgating is enabled.
+ */
+void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
+ u64 *flags)
{
int i;
@@ -1252,12 +2343,22 @@ void amdgpu_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
- adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
+ adev->ip_blocks[i].version->funcs->get_clockgating_state(
+ &adev->ip_blocks[i], flags);
}
}
-int amdgpu_wait_for_idle(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type)
+/**
+ * amdgpu_device_ip_wait_for_idle - wait for idle
+ *
+ * @adev: amdgpu_device pointer
+ * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
+ *
+ * Waits for the request hardware IP to be idle.
+ * Returns 0 for success or a negative error code on failure.
+ */
+int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type)
{
int i, r;
@@ -1265,9 +2366,12 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev,
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].version->type == block_type) {
- r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
- if (r)
- return r;
+ if (adev->ip_blocks[i].version->funcs->wait_for_idle) {
+ r = adev->ip_blocks[i].version->funcs->wait_for_idle(
+ &adev->ip_blocks[i]);
+ if (r)
+ return r;
+ }
break;
}
}
@@ -1275,23 +2379,40 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev,
}
-bool amdgpu_is_idle(struct amdgpu_device *adev,
- enum amd_ip_block_type block_type)
+/**
+ * amdgpu_device_ip_is_valid - is the hardware IP enabled
+ *
+ * @adev: amdgpu_device pointer
+ * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
+ *
+ * Check if the hardware IP is enable or not.
+ * Returns true if it the IP is enable, false if not.
+ */
+bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev,
+ enum amd_ip_block_type block_type)
{
int i;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_blocks[i].status.valid)
- continue;
if (adev->ip_blocks[i].version->type == block_type)
- return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
+ return adev->ip_blocks[i].status.valid;
}
- return true;
+ return false;
}
-struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
- enum amd_ip_block_type type)
+/**
+ * amdgpu_device_ip_get_ip_block - get a hw IP pointer
+ *
+ * @adev: amdgpu_device pointer
+ * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
+ *
+ * Returns a pointer to the hardware IP block structure
+ * if it exists for the asic, otherwise NULL.
+ */
+struct amdgpu_ip_block *
+amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
+ enum amd_ip_block_type type)
{
int i;
@@ -1303,7 +2424,7 @@ struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
}
/**
- * amdgpu_ip_block_version_cmp
+ * amdgpu_device_ip_block_version_cmp
*
* @adev: amdgpu_device pointer
* @type: enum amd_ip_block_type
@@ -1313,11 +2434,11 @@ struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev,
* return 0 if equal or greater
* return 1 if smaller or the ip_block doesn't exist
*/
-int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
- enum amd_ip_block_type type,
- u32 major, u32 minor)
+int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
+ enum amd_ip_block_type type,
+ u32 major, u32 minor)
{
- struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type);
+ struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
if (ip_block && ((ip_block->version->major > major) ||
((ip_block->version->major == major) &&
@@ -1327,8 +2448,35 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
return 1;
}
+static const char *ip_block_names[] = {
+ [AMD_IP_BLOCK_TYPE_COMMON] = "common",
+ [AMD_IP_BLOCK_TYPE_GMC] = "gmc",
+ [AMD_IP_BLOCK_TYPE_IH] = "ih",
+ [AMD_IP_BLOCK_TYPE_SMC] = "smu",
+ [AMD_IP_BLOCK_TYPE_PSP] = "psp",
+ [AMD_IP_BLOCK_TYPE_DCE] = "dce",
+ [AMD_IP_BLOCK_TYPE_GFX] = "gfx",
+ [AMD_IP_BLOCK_TYPE_SDMA] = "sdma",
+ [AMD_IP_BLOCK_TYPE_UVD] = "uvd",
+ [AMD_IP_BLOCK_TYPE_VCE] = "vce",
+ [AMD_IP_BLOCK_TYPE_ACP] = "acp",
+ [AMD_IP_BLOCK_TYPE_VCN] = "vcn",
+ [AMD_IP_BLOCK_TYPE_MES] = "mes",
+ [AMD_IP_BLOCK_TYPE_JPEG] = "jpeg",
+ [AMD_IP_BLOCK_TYPE_VPE] = "vpe",
+ [AMD_IP_BLOCK_TYPE_UMSCH_MM] = "umsch_mm",
+ [AMD_IP_BLOCK_TYPE_ISP] = "isp",
+};
+
+static const char *ip_block_name(struct amdgpu_device *adev, enum amd_ip_block_type type)
+{
+ int idx = (int)type;
+
+ return idx < ARRAY_SIZE(ip_block_names) ? ip_block_names[idx] : "unknown";
+}
+
/**
- * amdgpu_ip_block_add
+ * amdgpu_device_ip_block_add
*
* @adev: amdgpu_device pointer
* @ip_block_version: pointer to the IP to add
@@ -1336,24 +2484,58 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
* Adds the IP block driver information to the collection of IPs
* on the asic.
*/
-int amdgpu_ip_block_add(struct amdgpu_device *adev,
- const struct amdgpu_ip_block_version *ip_block_version)
+int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
+ const struct amdgpu_ip_block_version *ip_block_version)
{
if (!ip_block_version)
return -EINVAL;
+ switch (ip_block_version->type) {
+ case AMD_IP_BLOCK_TYPE_VCN:
+ if (adev->harvest_ip_mask & AMD_HARVEST_IP_VCN_MASK)
+ return 0;
+ break;
+ case AMD_IP_BLOCK_TYPE_JPEG:
+ if (adev->harvest_ip_mask & AMD_HARVEST_IP_JPEG_MASK)
+ return 0;
+ break;
+ default:
+ break;
+ }
+
+ dev_info(adev->dev, "detected ip block number %d <%s_v%d_%d_%d> (%s)\n",
+ adev->num_ip_blocks,
+ ip_block_name(adev, ip_block_version->type),
+ ip_block_version->major,
+ ip_block_version->minor,
+ ip_block_version->rev,
+ ip_block_version->funcs->name);
+
+ adev->ip_blocks[adev->num_ip_blocks].adev = adev;
+
adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;
return 0;
}
+/**
+ * amdgpu_device_enable_virtual_display - enable virtual display feature
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Enabled the virtual display feature if the user has enabled it via
+ * the module parameter virtual_display. This feature provides a virtual
+ * display hardware on headless boards or in virtualized environments.
+ * This function parses and validates the configuration string specified by
+ * the user and configures the virtual display configuration (number of
+ * virtual connectors, crtcs, etc.) specified.
+ */
static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
{
adev->enable_virtual_display = false;
if (amdgpu_virtual_display) {
- struct drm_device *ddev = adev->ddev;
- const char *pci_address_name = pci_name(ddev->pdev);
+ const char *pci_address_name = pci_name(adev->pdev);
char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
@@ -1384,38 +2566,197 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
}
}
- DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
- amdgpu_virtual_display, pci_address_name,
- adev->enable_virtual_display, adev->mode_info.num_crtc);
+ dev_info(
+ adev->dev,
+ "virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
+ amdgpu_virtual_display, pci_address_name,
+ adev->enable_virtual_display, adev->mode_info.num_crtc);
kfree(pciaddstr);
}
}
-static int amdgpu_early_init(struct amdgpu_device *adev)
+void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev)
{
- int i, r;
+ if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) {
+ adev->mode_info.num_crtc = 1;
+ adev->enable_virtual_display = true;
+ dev_info(adev->dev, "virtual_display:%d, num_crtc:%d\n",
+ adev->enable_virtual_display,
+ adev->mode_info.num_crtc);
+ }
+}
- amdgpu_device_enable_virtual_display(adev);
+/**
+ * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Parses the asic configuration parameters specified in the gpu info
+ * firmware and makes them available to the driver for use in configuring
+ * the asic.
+ * Returns 0 on success, -EINVAL on failure.
+ */
+static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
+{
+ const char *chip_name;
+ int err;
+ const struct gpu_info_firmware_header_v1_0 *hdr;
+
+ adev->firmware.gpu_info_fw = NULL;
switch (adev->asic_type) {
- case CHIP_TOPAZ:
- case CHIP_TONGA:
- case CHIP_FIJI:
- case CHIP_POLARIS11:
- case CHIP_POLARIS10:
- case CHIP_POLARIS12:
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
- adev->family = AMDGPU_FAMILY_CZ;
+ default:
+ return 0;
+ case CHIP_VEGA10:
+ chip_name = "vega10";
+ break;
+ case CHIP_VEGA12:
+ chip_name = "vega12";
+ break;
+ case CHIP_RAVEN:
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ chip_name = "raven2";
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
+ chip_name = "picasso";
else
- adev->family = AMDGPU_FAMILY_VI;
+ chip_name = "raven";
+ break;
+ case CHIP_ARCTURUS:
+ chip_name = "arcturus";
+ break;
+ case CHIP_NAVI12:
+ if (adev->mman.discovery_bin)
+ return 0;
+ chip_name = "navi12";
+ break;
+ case CHIP_CYAN_SKILLFISH:
+ chip_name = "cyan_skillfish";
+ break;
+ }
- r = vi_set_ip_blocks(adev);
+ err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw,
+ AMDGPU_UCODE_OPTIONAL,
+ "amdgpu/%s_gpu_info.bin", chip_name);
+ if (err) {
+ dev_err(adev->dev,
+ "Failed to get gpu_info firmware \"%s_gpu_info.bin\"\n",
+ chip_name);
+ goto out;
+ }
+
+ hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
+ amdgpu_ucode_print_gpu_info_hdr(&hdr->header);
+
+ switch (hdr->version_major) {
+ case 1:
+ {
+ const struct gpu_info_firmware_v1_0 *gpu_info_fw =
+ (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+
+ /*
+ * Should be dropped when DAL no longer needs it.
+ */
+ if (adev->asic_type == CHIP_NAVI12)
+ goto parse_soc_bounding_box;
+
+ adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
+ adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
+ adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
+ adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
+ adev->gfx.config.max_texture_channel_caches =
+ le32_to_cpu(gpu_info_fw->gc_num_tccs);
+ adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
+ adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
+ adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
+ adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
+ adev->gfx.config.double_offchip_lds_buf =
+ le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
+ adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
+ adev->gfx.cu_info.max_waves_per_simd =
+ le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
+ adev->gfx.cu_info.max_scratch_slots_per_cu =
+ le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
+ adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
+ if (hdr->version_minor >= 1) {
+ const struct gpu_info_firmware_v1_1 *gpu_info_fw =
+ (const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ adev->gfx.config.num_sc_per_sh =
+ le32_to_cpu(gpu_info_fw->num_sc_per_sh);
+ adev->gfx.config.num_packer_per_sc =
+ le32_to_cpu(gpu_info_fw->num_packer_per_sc);
+ }
+
+parse_soc_bounding_box:
+ /*
+ * soc bounding box info is not integrated in disocovery table,
+ * we always need to parse it from gpu info firmware if needed.
+ */
+ if (hdr->version_minor == 2) {
+ const struct gpu_info_firmware_v1_2 *gpu_info_fw =
+ (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
+ }
+ break;
+ }
+ default:
+ dev_err(adev->dev,
+ "Unsupported gpu_info table %d\n", hdr->header.ucode_version);
+ err = -EINVAL;
+ goto out;
+ }
+out:
+ return err;
+}
+
+static void amdgpu_uid_init(struct amdgpu_device *adev)
+{
+ /* Initialize the UID for the device */
+ adev->uid_info = kzalloc(sizeof(struct amdgpu_uid), GFP_KERNEL);
+ if (!adev->uid_info) {
+ dev_warn(adev->dev, "Failed to allocate memory for UID\n");
+ return;
+ }
+ adev->uid_info->adev = adev;
+}
+
+static void amdgpu_uid_fini(struct amdgpu_device *adev)
+{
+ /* Free the UID memory */
+ kfree(adev->uid_info);
+ adev->uid_info = NULL;
+}
+
+/**
+ * amdgpu_device_ip_early_init - run early init for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Early initialization pass for hardware IPs. The hardware IPs that make
+ * up each asic are discovered each IP's early_init callback is run. This
+ * is the first stage in initializing the asic.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ip_block *ip_block;
+ struct pci_dev *parent;
+ bool total, skip_bios;
+ uint32_t bios_flags;
+ int i, r;
+
+ amdgpu_device_enable_virtual_display(adev);
+
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_virt_request_full_gpu(adev, true);
if (r)
return r;
- break;
+ }
+
+ switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_SI
case CHIP_VERDE:
case CHIP_TAHITI:
@@ -1434,55 +2775,149 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
case CHIP_KAVERI:
case CHIP_KABINI:
case CHIP_MULLINS:
- if ((adev->asic_type == CHIP_BONAIRE) || (adev->asic_type == CHIP_HAWAII))
- adev->family = AMDGPU_FAMILY_CI;
- else
+ if (adev->flags & AMD_IS_APU)
adev->family = AMDGPU_FAMILY_KV;
+ else
+ adev->family = AMDGPU_FAMILY_CI;
r = cik_set_ip_blocks(adev);
if (r)
return r;
break;
#endif
- case CHIP_VEGA10:
- adev->family = AMDGPU_FAMILY_AI;
+ case CHIP_TOPAZ:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ if (adev->flags & AMD_IS_APU)
+ adev->family = AMDGPU_FAMILY_CZ;
+ else
+ adev->family = AMDGPU_FAMILY_VI;
- r = soc15_set_ip_blocks(adev);
+ r = vi_set_ip_blocks(adev);
if (r)
return r;
break;
default:
- /* FIXME: not supported yet */
- return -EINVAL;
+ r = amdgpu_discovery_set_ip_blocks(adev);
+ if (r)
+ return r;
+ break;
}
+ /* Check for IP version 9.4.3 with A0 hardware */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
+ !amdgpu_device_get_rev_id(adev)) {
+ dev_err(adev->dev, "Unsupported A0 hardware\n");
+ return -ENODEV; /* device unsupported - no device error */
+ }
+
+ if (amdgpu_has_atpx() &&
+ (amdgpu_is_atpx_hybrid() ||
+ amdgpu_has_atpx_dgpu_power_cntl()) &&
+ ((adev->flags & AMD_IS_APU) == 0) &&
+ !dev_is_removable(&adev->pdev->dev))
+ adev->flags |= AMD_IS_PX;
+
+ if (!(adev->flags & AMD_IS_APU)) {
+ parent = pcie_find_root_port(adev->pdev);
+ adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
+ }
+
+ adev->pm.pp_feature = amdgpu_pp_feature_mask;
+ if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
+ adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
+ if (!amdgpu_device_pcie_dynamic_switching_supported(adev))
+ adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK;
+
+ adev->virt.is_xgmi_node_migrate_enabled = false;
if (amdgpu_sriov_vf(adev)) {
- r = amdgpu_virt_request_full_gpu(adev, true);
- if (r)
- return r;
+ adev->virt.is_xgmi_node_migrate_enabled =
+ amdgpu_ip_version((adev), GC_HWIP, 0) == IP_VERSION(9, 4, 4);
}
+ total = true;
for (i = 0; i < adev->num_ip_blocks; i++) {
+ ip_block = &adev->ip_blocks[i];
+
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
- DRM_ERROR("disabled ip block: %d\n", i);
+ dev_warn(adev->dev, "disabled ip block: %d <%s>\n", i,
+ adev->ip_blocks[i].version->funcs->name);
adev->ip_blocks[i].status.valid = false;
- } else {
- if (adev->ip_blocks[i].version->funcs->early_init) {
- r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
- if (r == -ENOENT) {
- adev->ip_blocks[i].status.valid = false;
- } else if (r) {
- DRM_ERROR("early_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- return r;
- } else {
- adev->ip_blocks[i].status.valid = true;
- }
+ } else if (ip_block->version->funcs->early_init) {
+ r = ip_block->version->funcs->early_init(ip_block);
+ if (r == -ENOENT) {
+ adev->ip_blocks[i].status.valid = false;
+ } else if (r) {
+ dev_err(adev->dev,
+ "early_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
+ total = false;
} else {
adev->ip_blocks[i].status.valid = true;
}
+ } else {
+ adev->ip_blocks[i].status.valid = true;
+ }
+ /* get the vbios after the asic_funcs are set up */
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
+ r = amdgpu_device_parse_gpu_info_fw(adev);
+ if (r)
+ return r;
+
+ bios_flags = amdgpu_device_get_vbios_flags(adev);
+ skip_bios = !!(bios_flags & AMDGPU_VBIOS_SKIP);
+ /* Read BIOS */
+ if (!skip_bios) {
+ bool optional =
+ !!(bios_flags & AMDGPU_VBIOS_OPTIONAL);
+ if (!amdgpu_get_bios(adev) && !optional)
+ return -EINVAL;
+
+ if (optional && !adev->bios)
+ dev_info(
+ adev->dev,
+ "VBIOS image optional, proceeding without VBIOS image");
+
+ if (adev->bios) {
+ r = amdgpu_atombios_init(adev);
+ if (r) {
+ dev_err(adev->dev,
+ "amdgpu_atombios_init failed\n");
+ amdgpu_vf_error_put(
+ adev,
+ AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL,
+ 0, 0);
+ return r;
+ }
+ }
+ }
+
+ /*get pf2vf msg info at it's earliest time*/
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_init_data_exchange(adev);
+
}
}
+ if (!total)
+ return -ENODEV;
+
+ if (adev->gmc.xgmi.supported)
+ amdgpu_xgmi_early_init(adev);
+
+ if (amdgpu_is_multi_aid(adev))
+ amdgpu_uid_init(adev);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
+ if (ip_block->status.valid != false)
+ amdgpu_amdkfd_device_probe(adev);
adev->cg_flags &= amdgpu_cg_mask;
adev->pg_flags &= amdgpu_pg_mask;
@@ -1490,93 +2925,463 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_init(struct amdgpu_device *adev)
+static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
{
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_blocks[i].status.valid)
+ if (!adev->ip_blocks[i].status.sw)
continue;
- r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
- if (r) {
- DRM_ERROR("sw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- return r;
- }
- adev->ip_blocks[i].status.sw = true;
- /* need to do gmc hw init early so we can allocate gpu mem */
- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
- r = amdgpu_vram_scratch_init(adev);
- if (r) {
- DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
- return r;
- }
- r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
- if (r) {
- DRM_ERROR("hw_init %d failed %d\n", i, r);
- return r;
- }
- r = amdgpu_wb_init(adev);
+ if (adev->ip_blocks[i].status.hw)
+ continue;
+ if (!amdgpu_ip_member_of_hwini(
+ adev, adev->ip_blocks[i].version->type))
+ continue;
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
+ (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
+ r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("amdgpu_wb_init failed %d\n", r);
+ dev_err(adev->dev,
+ "hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
adev->ip_blocks[i].status.hw = true;
+ }
+ }
- /* right after GMC hw init, we create CSA */
- if (amdgpu_sriov_vf(adev)) {
- r = amdgpu_allocate_static_csa(adev);
+ return 0;
+}
+
+static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.sw)
+ continue;
+ if (adev->ip_blocks[i].status.hw)
+ continue;
+ if (!amdgpu_ip_member_of_hwini(
+ adev, adev->ip_blocks[i].version->type))
+ continue;
+ r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
+ if (r) {
+ dev_err(adev->dev,
+ "hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+ adev->ip_blocks[i].status.hw = true;
+ }
+
+ return 0;
+}
+
+static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
+{
+ int r = 0;
+ int i;
+ uint32_t smu_version;
+
+ if (adev->asic_type >= CHIP_VEGA10) {
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
+ continue;
+
+ if (!amdgpu_ip_member_of_hwini(adev,
+ AMD_IP_BLOCK_TYPE_PSP))
+ break;
+
+ if (!adev->ip_blocks[i].status.sw)
+ continue;
+
+ /* no need to do the fw loading again if already done*/
+ if (adev->ip_blocks[i].status.hw == true)
+ break;
+
+ if (amdgpu_in_reset(adev) || adev->in_suspend) {
+ r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
+ if (r)
+ return r;
+ } else {
+ r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("allocate CSA failed %d\n", r);
+ dev_err(adev->dev,
+ "hw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i]
+ .version->funcs->name,
+ r);
return r;
}
+ adev->ip_blocks[i].status.hw = true;
}
+ break;
}
}
- for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_blocks[i].status.sw)
- continue;
- /* gmc hw init is done early */
- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC)
+ if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
+ r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
+
+ return r;
+}
+
+static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
+{
+ struct drm_sched_init_args args = {
+ .ops = &amdgpu_sched_ops,
+ .num_rqs = DRM_SCHED_PRIORITY_COUNT,
+ .timeout_wq = adev->reset_domain->wq,
+ .dev = adev->dev,
+ };
+ long timeout;
+ int r, i;
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ /* No need to setup the GPU scheduler for rings that don't need it */
+ if (!ring || ring->no_scheduler)
continue;
- r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
+
+ switch (ring->funcs->type) {
+ case AMDGPU_RING_TYPE_GFX:
+ timeout = adev->gfx_timeout;
+ break;
+ case AMDGPU_RING_TYPE_COMPUTE:
+ timeout = adev->compute_timeout;
+ break;
+ case AMDGPU_RING_TYPE_SDMA:
+ timeout = adev->sdma_timeout;
+ break;
+ default:
+ timeout = adev->video_timeout;
+ break;
+ }
+
+ args.timeout = timeout;
+ args.credit_limit = ring->num_hw_submission;
+ args.score = ring->sched_score;
+ args.name = ring->name;
+
+ r = drm_sched_init(&ring->sched, &args);
if (r) {
- DRM_ERROR("hw_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "Failed to create scheduler on ring %s.\n",
+ ring->name);
+ return r;
+ }
+ r = amdgpu_uvd_entity_init(adev, ring);
+ if (r) {
+ dev_err(adev->dev,
+ "Failed to create UVD scheduling entity on ring %s.\n",
+ ring->name);
+ return r;
+ }
+ r = amdgpu_vce_entity_init(adev, ring);
+ if (r) {
+ dev_err(adev->dev,
+ "Failed to create VCE scheduling entity on ring %s.\n",
+ ring->name);
return r;
}
- adev->ip_blocks[i].status.hw = true;
}
+ if (adev->xcp_mgr)
+ amdgpu_xcp_update_partition_sched_list(adev);
+
return 0;
}
-static int amdgpu_late_init(struct amdgpu_device *adev)
+
+/**
+ * amdgpu_device_ip_init - run init for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Main initialization pass for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked and the sw_init and hw_init callbacks
+ * are run. sw_init initializes the software state associated with each IP
+ * and hw_init initializes the hardware associated with each IP.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_init(struct amdgpu_device *adev)
{
- int i = 0, r;
+ bool init_badpage;
+ int i, r;
+
+ r = amdgpu_ras_init(adev);
+ if (r)
+ return r;
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
- if (adev->ip_blocks[i].version->funcs->late_init) {
- r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
+ if (adev->ip_blocks[i].version->funcs->sw_init) {
+ r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("late_init of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- return r;
+ dev_err(adev->dev,
+ "sw_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
+ goto init_failed;
+ }
+ }
+ adev->ip_blocks[i].status.sw = true;
+
+ if (!amdgpu_ip_member_of_hwini(
+ adev, adev->ip_blocks[i].version->type))
+ continue;
+
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
+ /* need to do common hw init early so everything is set up for gmc */
+ r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
+ if (r) {
+ dev_err(adev->dev, "hw_init %d failed %d\n", i,
+ r);
+ goto init_failed;
+ }
+ adev->ip_blocks[i].status.hw = true;
+ } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+ /* need to do gmc hw init early so we can allocate gpu mem */
+ /* Try to reserve bad pages early */
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_exchange_data(adev);
+
+ r = amdgpu_device_mem_scratch_init(adev);
+ if (r) {
+ dev_err(adev->dev,
+ "amdgpu_mem_scratch_init failed %d\n",
+ r);
+ goto init_failed;
+ }
+ r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]);
+ if (r) {
+ dev_err(adev->dev, "hw_init %d failed %d\n", i,
+ r);
+ goto init_failed;
+ }
+ r = amdgpu_device_wb_init(adev);
+ if (r) {
+ dev_err(adev->dev,
+ "amdgpu_device_wb_init failed %d\n", r);
+ goto init_failed;
+ }
+ adev->ip_blocks[i].status.hw = true;
+
+ /* right after GMC hw init, we create CSA */
+ if (adev->gfx.mcbp) {
+ r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_CSA_SIZE);
+ if (r) {
+ dev_err(adev->dev,
+ "allocate CSA failed %d\n", r);
+ goto init_failed;
+ }
+ }
+
+ r = amdgpu_seq64_init(adev);
+ if (r) {
+ dev_err(adev->dev, "allocate seq64 failed %d\n",
+ r);
+ goto init_failed;
+ }
+ }
+ }
+
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_init_data_exchange(adev);
+
+ r = amdgpu_ib_pool_init(adev);
+ if (r) {
+ dev_err(adev->dev, "IB initialization failed (%d).\n", r);
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
+ goto init_failed;
+ }
+
+ r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
+ if (r)
+ goto init_failed;
+
+ r = amdgpu_device_ip_hw_init_phase1(adev);
+ if (r)
+ goto init_failed;
+
+ r = amdgpu_device_fw_loading(adev);
+ if (r)
+ goto init_failed;
+
+ r = amdgpu_device_ip_hw_init_phase2(adev);
+ if (r)
+ goto init_failed;
+
+ /*
+ * retired pages will be loaded from eeprom and reserved here,
+ * it should be called after amdgpu_device_ip_hw_init_phase2 since
+ * for some ASICs the RAS EEPROM code relies on SMU fully functioning
+ * for I2C communication which only true at this point.
+ *
+ * amdgpu_ras_recovery_init may fail, but the upper only cares the
+ * failure from bad gpu situation and stop amdgpu init process
+ * accordingly. For other failed cases, it will still release all
+ * the resource and print error message, rather than returning one
+ * negative value to upper level.
+ *
+ * Note: theoretically, this should be called before all vram allocations
+ * to protect retired page from abusing
+ */
+ init_badpage = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI);
+ r = amdgpu_ras_recovery_init(adev, init_badpage);
+ if (r)
+ goto init_failed;
+
+ /**
+ * In case of XGMI grab extra reference for reset domain for this device
+ */
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (amdgpu_xgmi_add_device(adev) == 0) {
+ if (!amdgpu_sriov_vf(adev)) {
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+
+ if (WARN_ON(!hive)) {
+ r = -ENOENT;
+ goto init_failed;
+ }
+
+ if (!hive->reset_domain ||
+ !amdgpu_reset_get_reset_domain(hive->reset_domain)) {
+ r = -ENOENT;
+ amdgpu_put_xgmi_hive(hive);
+ goto init_failed;
+ }
+
+ /* Drop the early temporary reset domain we created for device */
+ amdgpu_reset_put_reset_domain(adev->reset_domain);
+ adev->reset_domain = hive->reset_domain;
+ amdgpu_put_xgmi_hive(hive);
}
- adev->ip_blocks[i].status.late_initialized = true;
}
+ }
+
+ r = amdgpu_device_init_schedulers(adev);
+ if (r)
+ goto init_failed;
+
+ if (adev->mman.buffer_funcs_ring->sched.ready)
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
+
+ /* Don't init kfd if whole hive need to be reset during init */
+ if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
+ kgd2kfd_init_zone_device(adev);
+ amdgpu_amdkfd_device_init(adev);
+ }
+
+ amdgpu_fru_get_product_info(adev);
+
+ if (!amdgpu_sriov_vf(adev) || amdgpu_sriov_ras_cper_en(adev))
+ r = amdgpu_cper_init(adev);
+
+init_failed:
+
+ return r;
+}
+
+/**
+ * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Writes a reset magic value to the gart pointer in VRAM. The driver calls
+ * this function before a GPU reset. If the value is retained after a
+ * GPU reset, VRAM has not been lost. Some GPU resets may destroy VRAM contents.
+ */
+static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
+{
+ memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
+}
+
+/**
+ * amdgpu_device_check_vram_lost - check if vram is valid
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Checks the reset magic value written to the gart pointer in VRAM.
+ * The driver calls this after a GPU reset to see if the contents of
+ * VRAM is lost or now.
+ * returns true if vram is lost, false if not.
+ */
+static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
+{
+ if (memcmp(adev->gart.ptr, adev->reset_magic,
+ AMDGPU_RESET_MAGIC_NUM))
+ return true;
+
+ if (!amdgpu_in_reset(adev))
+ return false;
+
+ /*
+ * For all ASICs with baco/mode1 reset, the VRAM is
+ * always assumed to be lost.
+ */
+ switch (amdgpu_asic_reset_method(adev)) {
+ case AMD_RESET_METHOD_LEGACY:
+ case AMD_RESET_METHOD_LINK:
+ case AMD_RESET_METHOD_BACO:
+ case AMD_RESET_METHOD_MODE1:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * amdgpu_device_set_cg_state - set clockgating for amdgpu device
+ *
+ * @adev: amdgpu_device pointer
+ * @state: clockgating state (gate or ungate)
+ *
+ * The list of all the hardware IPs that make up the asic is walked and the
+ * set_clockgating_state callbacks are run.
+ * Late initialization pass enabling clockgating for hardware IPs.
+ * Fini or suspend, pass disabling clockgating for hardware IPs.
+ * Returns 0 on success, negative error code on failure.
+ */
+
+int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
+ enum amd_clockgating_state state)
+{
+ int i, j, r;
+
+ if (amdgpu_emu_mode == 1)
+ return 0;
+
+ for (j = 0; j < adev->num_ip_blocks; j++) {
+ i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
+ if (!adev->ip_blocks[i].status.late_initialized)
+ continue;
+ /* skip CG for GFX, SDMA on S0ix */
+ if (adev->in_s0ix &&
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
+ continue;
/* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
- adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
+ adev->ip_blocks[i].version->funcs->set_clockgating_state) {
/* enable clockgating to save power */
- r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_GATE);
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state(&adev->ip_blocks[i],
+ state);
if (r) {
- DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_clockgating_state(gate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
}
@@ -1585,72 +3390,308 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_fini(struct amdgpu_device *adev)
+int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
+ enum amd_powergating_state state)
{
- int i, r;
+ int i, j, r;
- /* need to disable SMC first */
- for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_blocks[i].status.hw)
+ if (amdgpu_emu_mode == 1)
+ return 0;
+
+ for (j = 0; j < adev->num_ip_blocks; j++) {
+ i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
+ if (!adev->ip_blocks[i].status.late_initialized)
continue;
- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
- /* ungate blocks before hw fini so that we can shutdown the blocks safely */
- r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_UNGATE);
+ /* skip PG for GFX, SDMA on S0ix */
+ if (adev->in_s0ix &&
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
+ continue;
+ /* skip CG for VCE/UVD, it's handled specially */
+ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
+ adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
+ adev->ip_blocks[i].version->funcs->set_powergating_state) {
+ /* enable powergating to save power */
+ r = adev->ip_blocks[i].version->funcs->set_powergating_state(&adev->ip_blocks[i],
+ state);
if (r) {
- DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "set_powergating_state(gate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
- r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
- /* XXX handle errors */
- if (r) {
- DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- }
- adev->ip_blocks[i].status.hw = false;
- break;
}
}
+ return 0;
+}
- for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
- if (!adev->ip_blocks[i].status.hw)
- continue;
- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
- amdgpu_wb_fini(adev);
- amdgpu_vram_scratch_fini(adev);
+static int amdgpu_device_enable_mgpu_fan_boost(void)
+{
+ struct amdgpu_gpu_instance *gpu_ins;
+ struct amdgpu_device *adev;
+ int i, ret = 0;
+
+ mutex_lock(&mgpu_info.mutex);
+
+ /*
+ * MGPU fan boost feature should be enabled
+ * only when there are two or more dGPUs in
+ * the system
+ */
+ if (mgpu_info.num_dgpu < 2)
+ goto out;
+
+ for (i = 0; i < mgpu_info.num_dgpu; i++) {
+ gpu_ins = &(mgpu_info.gpu_ins[i]);
+ adev = gpu_ins->adev;
+ if (!(adev->flags & AMD_IS_APU || amdgpu_sriov_multi_vf_mode(adev)) &&
+ !gpu_ins->mgpu_fan_enabled) {
+ ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
+ if (ret)
+ break;
+
+ gpu_ins->mgpu_fan_enabled = 1;
}
+ }
- if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
- adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) {
- /* ungate blocks before hw fini so that we can shutdown the blocks safely */
- r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_UNGATE);
+out:
+ mutex_unlock(&mgpu_info.mutex);
+
+ return ret;
+}
+
+/**
+ * amdgpu_device_ip_late_init - run late init for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Late initialization pass for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked and the late_init callbacks are run.
+ * late_init covers any special initialization that an IP requires
+ * after all of the have been initialized or something that needs to happen
+ * late in the init process.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_gpu_instance *gpu_instance;
+ int i = 0, r;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.hw)
+ continue;
+ if (adev->ip_blocks[i].version->funcs->late_init) {
+ r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]);
if (r) {
- DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_err(adev->dev,
+ "late_init of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
return r;
}
}
+ adev->ip_blocks[i].status.late_initialized = true;
+ }
+
+ r = amdgpu_ras_late_init(adev);
+ if (r) {
+ dev_err(adev->dev, "amdgpu_ras_late_init failed %d", r);
+ return r;
+ }
+
+ if (!amdgpu_reset_in_recovery(adev))
+ amdgpu_ras_set_error_query_ready(adev, true);
+
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
+
+ amdgpu_device_fill_reset_magic(adev);
+
+ r = amdgpu_device_enable_mgpu_fan_boost();
+ if (r)
+ dev_err(adev->dev, "enable mgpu fan boost failed (%d).\n", r);
+
+ /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */
+ if (amdgpu_passthrough(adev) &&
+ ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1) ||
+ adev->asic_type == CHIP_ALDEBARAN))
+ amdgpu_dpm_handle_passthrough_sbr(adev, true);
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ mutex_lock(&mgpu_info.mutex);
+
+ /*
+ * Reset device p-state to low as this was booted with high.
+ *
+ * This should be performed only after all devices from the same
+ * hive get initialized.
+ *
+ * However, it's unknown how many device in the hive in advance.
+ * As this is counted one by one during devices initializations.
+ *
+ * So, we wait for all XGMI interlinked devices initialized.
+ * This may bring some delays as those devices may come from
+ * different hives. But that should be OK.
+ */
+ if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
+ for (i = 0; i < mgpu_info.num_gpu; i++) {
+ gpu_instance = &(mgpu_info.gpu_ins[i]);
+ if (gpu_instance->adev->flags & AMD_IS_APU)
+ continue;
+
+ r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
+ AMDGPU_XGMI_PSTATE_MIN);
+ if (r) {
+ dev_err(adev->dev,
+ "pstate setting failed (%d).\n",
+ r);
+ break;
+ }
+ }
+ }
+
+ mutex_unlock(&mgpu_info.mutex);
+ }
+
+ return 0;
+}
- r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
+static void amdgpu_ip_block_hw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ if (!ip_block->version->funcs->hw_fini) {
+ dev_err(adev->dev, "hw_fini of IP block <%s> not defined\n",
+ ip_block->version->funcs->name);
+ } else {
+ r = ip_block->version->funcs->hw_fini(ip_block);
/* XXX handle errors */
if (r) {
- DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ dev_dbg(adev->dev,
+ "hw_fini of IP block <%s> failed %d\n",
+ ip_block->version->funcs->name, r);
}
+ }
- adev->ip_blocks[i].status.hw = false;
+ ip_block->status.hw = false;
+}
+
+/**
+ * amdgpu_device_smu_fini_early - smu hw_fini wrapper
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * For ASICs need to disable SMC first
+ */
+static void amdgpu_device_smu_fini_early(struct amdgpu_device *adev)
+{
+ int i;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
+ return;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.hw)
+ continue;
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
+ amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]);
+ break;
+ }
}
+}
+
+static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].version->funcs->early_fini)
+ continue;
+
+ r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]);
+ if (r) {
+ dev_dbg(adev->dev,
+ "early_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ }
+ }
+
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+
+ amdgpu_amdkfd_suspend(adev, true);
+ amdgpu_userq_suspend(adev);
+
+ /* Workaround for ASICs need to disable SMC first */
+ amdgpu_device_smu_fini_early(adev);
+
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!adev->ip_blocks[i].status.hw)
+ continue;
+
+ amdgpu_ip_block_hw_fini(&adev->ip_blocks[i]);
+ }
+
+ if (amdgpu_sriov_vf(adev)) {
+ if (amdgpu_virt_release_full_gpu(adev, false))
+ dev_err(adev->dev,
+ "failed to release exclusive mode on fini\n");
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_device_ip_fini - run fini for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Main teardown pass for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
+ * are run. hw_fini tears down the hardware associated with each IP
+ * and sw_fini tears down any software state associated with each IP.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ amdgpu_cper_fini(adev);
+
+ if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
+ amdgpu_virt_release_ras_err_handler_data(adev);
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1)
+ amdgpu_xgmi_remove_device(adev);
+
+ amdgpu_amdkfd_device_fini_sw(adev);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.sw)
continue;
- r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
- /* XXX handle errors */
- if (r) {
- DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+ amdgpu_ucode_free_bo(adev);
+ amdgpu_free_static_csa(&adev->virt.csa_obj);
+ amdgpu_device_wb_fini(adev);
+ amdgpu_device_mem_scratch_fini(adev);
+ amdgpu_ib_pool_fini(adev);
+ amdgpu_seq64_fini(adev);
+ amdgpu_doorbell_fini(adev);
+ }
+ if (adev->ip_blocks[i].version->funcs->sw_fini) {
+ r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]);
+ /* XXX handle errors */
+ if (r) {
+ dev_dbg(adev->dev,
+ "sw_fini of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
+ }
}
adev->ip_blocks[i].status.sw = false;
adev->ip_blocks[i].status.valid = false;
@@ -1660,141 +3701,693 @@ static int amdgpu_fini(struct amdgpu_device *adev)
if (!adev->ip_blocks[i].status.late_initialized)
continue;
if (adev->ip_blocks[i].version->funcs->late_fini)
- adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
+ adev->ip_blocks[i].version->funcs->late_fini(&adev->ip_blocks[i]);
adev->ip_blocks[i].status.late_initialized = false;
}
- if (amdgpu_sriov_vf(adev)) {
- amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
- amdgpu_virt_release_full_gpu(adev, false);
- }
+ amdgpu_ras_fini(adev);
+ amdgpu_uid_fini(adev);
return 0;
}
-int amdgpu_suspend(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_delayed_init_work_handler - work handler for IB tests
+ *
+ * @work: work_struct.
+ */
+static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
+{
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device, delayed_init_work.work);
+ int r;
+
+ r = amdgpu_ib_ring_tests(adev);
+ if (r)
+ dev_err(adev->dev, "ib ring test failed (%d).\n", r);
+}
+
+static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
+{
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
+
+ WARN_ON_ONCE(adev->gfx.gfx_off_state);
+ WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
+
+ if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true, 0))
+ adev->gfx.gfx_off_state = true;
+}
+
+/**
+ * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Main suspend function for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked, clockgating is disabled and the
+ * suspend callbacks are run. suspend puts the hardware and software state
+ * in each IP into a state suitable for suspend.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
{
int i, r;
- if (amdgpu_sriov_vf(adev))
- amdgpu_virt_request_full_gpu(adev, false);
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
- /* ungate SMC block first */
- r = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,
- AMD_CG_STATE_UNGATE);
- if (r) {
- DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n",r);
+ /*
+ * Per PMFW team's suggestion, driver needs to handle gfxoff
+ * and df cstate features disablement for gpu reset(e.g. Mode1Reset)
+ * scenario. Add the missing df cstate disablement here.
+ */
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ dev_warn(adev->dev, "Failed to disallow df cstate");
+
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+
+ /* displays are handled separately */
+ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
+ continue;
+
+ /* XXX handle errors */
+ r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
+ if (r)
+ return r;
}
+ return 0;
+}
+
+/**
+ * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Main suspend function for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked, clockgating is disabled and the
+ * suspend callbacks are run. suspend puts the hardware and software state
+ * in each IP into a state suitable for suspend.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ if (adev->in_s0ix)
+ amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry);
+
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid)
continue;
- /* ungate blocks so that suspend can properly shut them down */
- if (i != AMD_IP_BLOCK_TYPE_SMC) {
- r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
- AMD_CG_STATE_UNGATE);
- if (r) {
- DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- }
+ /* displays are handled in phase1 */
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
+ continue;
+ /* PSP lost connection when err_event_athub occurs */
+ if (amdgpu_ras_intr_triggered() &&
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
+ adev->ip_blocks[i].status.hw = false;
+ continue;
}
+
+ /* skip unnecessary suspend if we do not initialize them yet */
+ if (!amdgpu_ip_member_of_hwini(
+ adev, adev->ip_blocks[i].version->type))
+ continue;
+
+ /* Since we skip suspend for S0i3, we need to cancel the delayed
+ * idle work here as the suspend callback never gets called.
+ */
+ if (adev->in_s0ix &&
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))
+ cancel_delayed_work_sync(&adev->gfx.idle_work);
+ /* skip suspend of gfx/mes and psp for S0ix
+ * gfx is in gfxoff state, so on resume it will exit gfxoff just
+ * like at runtime. PSP is also part of the always on hardware
+ * so no need to suspend it.
+ */
+ if (adev->in_s0ix &&
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
+ continue;
+
+ /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
+ if (adev->in_s0ix &&
+ (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=
+ IP_VERSION(5, 0, 0)) &&
+ (adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_SDMA))
+ continue;
+
+ /* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
+ * These are in TMR, hence are expected to be reused by PSP-TOS to reload
+ * from this location and RLC Autoload automatically also gets loaded
+ * from here based on PMFW -> PSP message during re-init sequence.
+ * Therefore, the psp suspend & resume should be skipped to avoid destroy
+ * the TMR and reload FWs again for IMU enabled APU ASICs.
+ */
+ if (amdgpu_in_reset(adev) &&
+ (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
+ continue;
+
/* XXX handle errors */
- r = adev->ip_blocks[i].version->funcs->suspend(adev);
- /* XXX handle errors */
- if (r) {
- DRM_ERROR("suspend of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
+ adev->ip_blocks[i].status.hw = false;
+
+ /* handle putting the SMC in the appropriate state */
+ if (!amdgpu_sriov_vf(adev)) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
+ r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
+ if (r) {
+ dev_err(adev->dev,
+ "SMC failed to set mp1 state %d, %d\n",
+ adev->mp1_state, r);
+ return r;
+ }
+ }
}
}
+ return 0;
+}
+
+/**
+ * amdgpu_device_ip_suspend - run suspend for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Main suspend function for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked, clockgating is disabled and the
+ * suspend callbacks are run. suspend puts the hardware and software state
+ * in each IP into a state suitable for suspend.
+ * Returns 0 on success, negative error code on failure.
+ */
+int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_fini_data_exchange(adev);
+ amdgpu_virt_request_full_gpu(adev, false);
+ }
+
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+
+ r = amdgpu_device_ip_suspend_phase1(adev);
+ if (r)
+ return r;
+ r = amdgpu_device_ip_suspend_phase2(adev);
+
if (amdgpu_sriov_vf(adev))
amdgpu_virt_release_full_gpu(adev, false);
- return 0;
+ return r;
}
-static int amdgpu_sriov_reinit_early(struct amdgpu_device *adev)
+static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
{
int i, r;
+ static enum amd_ip_block_type ip_order[] = {
+ AMD_IP_BLOCK_TYPE_COMMON,
+ AMD_IP_BLOCK_TYPE_GMC,
+ AMD_IP_BLOCK_TYPE_PSP,
+ AMD_IP_BLOCK_TYPE_IH,
+ };
+
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_blocks[i].status.valid)
+ int j;
+ struct amdgpu_ip_block *block;
+
+ block = &adev->ip_blocks[i];
+ block->status.hw = false;
+
+ for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
+
+ if (block->version->type != ip_order[j] ||
+ !block->status.valid)
+ continue;
+
+ r = block->version->funcs->hw_init(&adev->ip_blocks[i]);
+ if (r) {
+ dev_err(adev->dev, "RE-INIT-early: %s failed\n",
+ block->version->funcs->name);
+ return r;
+ }
+ block->status.hw = true;
+ }
+ }
+
+ return 0;
+}
+
+static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
+{
+ struct amdgpu_ip_block *block;
+ int i, r = 0;
+
+ static enum amd_ip_block_type ip_order[] = {
+ AMD_IP_BLOCK_TYPE_SMC,
+ AMD_IP_BLOCK_TYPE_DCE,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_IP_BLOCK_TYPE_SDMA,
+ AMD_IP_BLOCK_TYPE_MES,
+ AMD_IP_BLOCK_TYPE_UVD,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_IP_BLOCK_TYPE_VCN,
+ AMD_IP_BLOCK_TYPE_JPEG
+ };
+
+ for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
+ block = amdgpu_device_ip_get_ip_block(adev, ip_order[i]);
+
+ if (!block)
continue;
+ if (block->status.valid && !block->status.hw) {
+ if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) {
+ r = amdgpu_ip_block_resume(block);
+ } else {
+ r = block->version->funcs->hw_init(block);
+ }
+
+ if (r) {
+ dev_err(adev->dev, "RE-INIT-late: %s failed\n",
+ block->version->funcs->name);
+ break;
+ }
+ block->status.hw = true;
+ }
+ }
+
+ return r;
+}
+
+/**
+ * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * First resume function for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked and the resume callbacks are run for
+ * COMMON, GMC, and IH. resume puts the hardware into a functional state
+ * after a suspend and updates the software state as necessary. This
+ * function is also used for restoring the GPU after a GPU reset.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
+ continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)
- r = adev->ip_blocks[i].version->funcs->hw_init(adev);
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP && amdgpu_sriov_vf(adev))) {
- if (r) {
- DRM_ERROR("resume of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- return r;
+ r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
+ if (r)
+ return r;
}
}
return 0;
}
-static int amdgpu_sriov_reinit_late(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Second resume function for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked and the resume callbacks are run for
+ * all blocks except COMMON, GMC, and IH. resume puts the hardware into a
+ * functional state after a suspend and updates the software state as
+ * necessary. This function is also used for restoring the GPU after a GPU
+ * reset.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
{
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_blocks[i].status.valid)
+ if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
continue;
-
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH )
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
continue;
-
- r = adev->ip_blocks[i].version->funcs->hw_init(adev);
- if (r) {
- DRM_ERROR("resume of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
+ r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
+ if (r)
return r;
- }
}
return 0;
}
-static int amdgpu_resume(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_ip_resume_phase3 - run resume for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Third resume function for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked and the resume callbacks are run for
+ * all DCE. resume puts the hardware into a functional state after a suspend
+ * and updates the software state as necessary. This function is also used
+ * for restoring the GPU after a GPU reset.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_resume_phase3(struct amdgpu_device *adev)
{
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
- if (!adev->ip_blocks[i].status.valid)
+ if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
continue;
- r = adev->ip_blocks[i].version->funcs->resume(adev);
- if (r) {
- DRM_ERROR("resume of IP block <%s> failed %d\n",
- adev->ip_blocks[i].version->funcs->name, r);
- return r;
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
+ r = amdgpu_ip_block_resume(&adev->ip_blocks[i]);
+ if (r)
+ return r;
}
}
return 0;
}
+/**
+ * amdgpu_device_ip_resume - run resume for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Main resume function for hardware IPs. The hardware IPs
+ * are split into two resume functions because they are
+ * also used in recovering from a GPU reset and some additional
+ * steps need to be take between them. In this case (S3/S4) they are
+ * run sequentially.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_device_ip_resume_phase1(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_device_fw_loading(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_device_ip_resume_phase2(adev);
+
+ if (adev->mman.buffer_funcs_ring->sched.ready)
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
+
+ if (r)
+ return r;
+
+ amdgpu_fence_driver_hw_init(adev);
+
+ r = amdgpu_device_ip_resume_phase3(adev);
+
+ return r;
+}
+
+/**
+ * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Query the VBIOS data tables to determine if the board supports SR-IOV.
+ */
static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
{
- if (adev->is_atom_fw) {
- if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
+ if (amdgpu_sriov_vf(adev)) {
+ if (adev->is_atom_fw) {
+ if (amdgpu_atomfirmware_gpu_virtualization_supported(adev))
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
+ } else {
+ if (amdgpu_atombios_has_gpu_virtualization_table(adev))
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
+ }
+
+ if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
+ }
+}
+
+/**
+ * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
+ *
+ * @pdev : pci device context
+ * @asic_type: AMD asic type
+ *
+ * Check if there is DC (new modesetting infrastructre) support for an asic.
+ * returns true if DC has support, false if not.
+ */
+bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev,
+ enum amd_asic_type asic_type)
+{
+ switch (asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_HAINAN:
+#endif
+ case CHIP_TOPAZ:
+ /* chips with no display hardware */
+ return false;
+#if defined(CONFIG_DRM_AMD_DC)
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ /*
+ * We have systems in the wild with these ASICs that require
+ * LVDS and VGA support which is not supported with DC.
+ *
+ * Fallback to the non-DC driver here by default so as not to
+ * cause regressions.
+ */
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ return amdgpu_dc > 0;
+#else
+ return false;
+#endif
+ case CHIP_BONAIRE:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ /*
+ * We have systems in the wild with these ASICs that require
+ * VGA support which is not supported with DC.
+ *
+ * Fallback to the non-DC driver here by default so as not to
+ * cause regressions.
+ */
+ return amdgpu_dc > 0;
+ default:
+ return amdgpu_dc != 0;
+#else
+ default:
+ if (amdgpu_dc > 0)
+ dev_info_once(
+ &pdev->dev,
+ "Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n");
+ return false;
+#endif
+ }
+}
+
+/**
+ * amdgpu_device_has_dc_support - check if dc is supported
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true for supported, false for not supported
+ */
+bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
+{
+ if (adev->enable_virtual_display ||
+ (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
+ return false;
+
+ return amdgpu_device_asic_has_dc_support(adev->pdev, adev->asic_type);
+}
+
+static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
+{
+ struct amdgpu_device *adev =
+ container_of(__work, struct amdgpu_device, xgmi_reset_work);
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+
+ /* It's a bug to not have a hive within this function */
+ if (WARN_ON(!hive))
+ return;
+
+ /*
+ * Use task barrier to synchronize all xgmi reset works across the
+ * hive. task_barrier_enter and task_barrier_exit will block
+ * until all the threads running the xgmi reset works reach
+ * those points. task_barrier_full will do both blocks.
+ */
+ if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
+
+ task_barrier_enter(&hive->tb);
+ adev->asic_reset_res = amdgpu_device_baco_enter(adev);
+
+ if (adev->asic_reset_res)
+ goto fail;
+
+ task_barrier_exit(&hive->tb);
+ adev->asic_reset_res = amdgpu_device_baco_exit(adev);
+
+ if (adev->asic_reset_res)
+ goto fail;
+
+ amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__MMHUB);
} else {
- if (amdgpu_atombios_has_gpu_virtualization_table(adev))
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
+
+ task_barrier_full(&hive->tb);
+ adev->asic_reset_res = amdgpu_asic_reset(adev);
+ }
+
+fail:
+ if (adev->asic_reset_res)
+ dev_warn(adev->dev,
+ "ASIC reset failed with error, %d for drm dev, %s",
+ adev->asic_reset_res, adev_to_drm(adev)->unique);
+ amdgpu_put_xgmi_hive(hive);
+}
+
+static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
+{
+ char *input = amdgpu_lockup_timeout;
+ char *timeout_setting = NULL;
+ int index = 0;
+ long timeout;
+ int ret = 0;
+
+ /*
+ * By default timeout for jobs is 10 sec
+ */
+ adev->compute_timeout = adev->gfx_timeout = msecs_to_jiffies(10000);
+ adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
+
+ if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
+ while ((timeout_setting = strsep(&input, ",")) &&
+ strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
+ ret = kstrtol(timeout_setting, 0, &timeout);
+ if (ret)
+ return ret;
+
+ if (timeout == 0) {
+ index++;
+ continue;
+ } else if (timeout < 0) {
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ dev_warn(adev->dev, "lockup timeout disabled");
+ add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
+ } else {
+ timeout = msecs_to_jiffies(timeout);
+ }
+
+ switch (index++) {
+ case 0:
+ adev->gfx_timeout = timeout;
+ break;
+ case 1:
+ adev->compute_timeout = timeout;
+ break;
+ case 2:
+ adev->sdma_timeout = timeout;
+ break;
+ case 3:
+ adev->video_timeout = timeout;
+ break;
+ default:
+ break;
+ }
+ }
+ /*
+ * There is only one value specified and
+ * it should apply to all non-compute jobs.
+ */
+ if (index == 1) {
+ adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
+ if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
+ adev->compute_timeout = adev->gfx_timeout;
+ }
}
+
+ return ret;
+}
+
+/**
+ * amdgpu_device_check_iommu_direct_map - check if RAM direct mapped to GPU
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * RAM direct mapped to GPU if IOMMU is not enabled or is pass through mode
+ */
+static void amdgpu_device_check_iommu_direct_map(struct amdgpu_device *adev)
+{
+ struct iommu_domain *domain;
+
+ domain = iommu_get_domain_for_dev(adev->dev);
+ if (!domain || domain->type == IOMMU_DOMAIN_IDENTITY)
+ adev->ram_is_direct_mapped = true;
+}
+
+#if defined(CONFIG_HSA_AMD_P2P)
+/**
+ * amdgpu_device_check_iommu_remap - Check if DMA remapping is enabled.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * return if IOMMU remapping bar address
+ */
+static bool amdgpu_device_check_iommu_remap(struct amdgpu_device *adev)
+{
+ struct iommu_domain *domain;
+
+ domain = iommu_get_domain_for_dev(adev->dev);
+ if (domain && (domain->type == IOMMU_DOMAIN_DMA ||
+ domain->type == IOMMU_DOMAIN_DMA_FQ))
+ return true;
+
+ return false;
+}
+#endif
+
+static void amdgpu_device_set_mcbp(struct amdgpu_device *adev)
+{
+ if (amdgpu_mcbp == 1)
+ adev->gfx.mcbp = true;
+ else if (amdgpu_mcbp == 0)
+ adev->gfx.mcbp = false;
+
+ if (amdgpu_sriov_vf(adev))
+ adev->gfx.mcbp = true;
+
+ if (adev->gfx.mcbp)
+ dev_info(adev->dev, "MCBP is enabled\n");
}
/**
* amdgpu_device_init - initialize the driver
*
* @adev: amdgpu_device pointer
- * @pdev: drm dev pointer
- * @pdev: pci dev pointer
* @flags: driver flags
*
* Initializes the driver info and hw (all asics).
@@ -1802,37 +4395,50 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
* Called at driver startup.
*/
int amdgpu_device_init(struct amdgpu_device *adev,
- struct drm_device *ddev,
- struct pci_dev *pdev,
uint32_t flags)
{
+ struct pci_dev *pdev = adev->pdev;
int r, i;
- bool runtime = false;
+ bool px = false;
u32 max_MBps;
+ int tmp;
adev->shutdown = false;
- adev->dev = &pdev->dev;
- adev->ddev = ddev;
- adev->pdev = pdev;
adev->flags = flags;
- adev->asic_type = flags & AMD_ASIC_MASK;
+
+ if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
+ adev->asic_type = amdgpu_force_asic_type;
+ else
+ adev->asic_type = flags & AMD_ASIC_MASK;
+
adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
- adev->mc.gtt_size = 512 * 1024 * 1024;
+ if (amdgpu_emu_mode == 1)
+ adev->usec_timeout *= 10;
+ adev->gmc.gart_size = 512 * 1024 * 1024;
adev->accel_working = false;
adev->num_rings = 0;
+ RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub());
adev->mman.buffer_funcs = NULL;
adev->mman.buffer_funcs_ring = NULL;
adev->vm_manager.vm_pte_funcs = NULL;
- adev->vm_manager.vm_pte_num_rings = 0;
- adev->gart.gart_funcs = NULL;
+ adev->vm_manager.vm_pte_num_scheds = 0;
+ adev->gmc.gmc_funcs = NULL;
+ adev->harvest_ip_mask = 0x0;
adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
+ bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
adev->smc_rreg = &amdgpu_invalid_rreg;
adev->smc_wreg = &amdgpu_invalid_wreg;
adev->pcie_rreg = &amdgpu_invalid_rreg;
adev->pcie_wreg = &amdgpu_invalid_wreg;
+ adev->pcie_rreg_ext = &amdgpu_invalid_rreg_ext;
+ adev->pcie_wreg_ext = &amdgpu_invalid_wreg_ext;
adev->pciep_rreg = &amdgpu_invalid_rreg;
adev->pciep_wreg = &amdgpu_invalid_wreg;
+ adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
+ adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
+ adev->pcie_rreg64_ext = &amdgpu_invalid_rreg64_ext;
+ adev->pcie_wreg64_ext = &amdgpu_invalid_wreg64_ext;
adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
adev->didt_rreg = &amdgpu_invalid_rreg;
@@ -1842,41 +4448,112 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
-
- DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
- amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
- pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
+ dev_info(
+ adev->dev,
+ "initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
+ amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
/* mutex initialization are all done here so we
- * can recall function without having locking issues */
- atomic_set(&adev->irq.ih.lock, 0);
+ * can recall function without having locking issues
+ */
mutex_init(&adev->firmware.mutex);
mutex_init(&adev->pm.mutex);
mutex_init(&adev->gfx.gpu_clock_mutex);
mutex_init(&adev->srbm_mutex);
+ mutex_init(&adev->gfx.pipe_reserve_mutex);
+ mutex_init(&adev->gfx.gfx_off_mutex);
+ mutex_init(&adev->gfx.partition_mutex);
mutex_init(&adev->grbm_idx_mutex);
mutex_init(&adev->mn_lock);
+ mutex_init(&adev->virt.vf_errors.lock);
hash_init(adev->mn_hash);
+ mutex_init(&adev->psp.mutex);
+ mutex_init(&adev->notifier_lock);
+ mutex_init(&adev->pm.stable_pstate_ctx_lock);
+ mutex_init(&adev->benchmark_mutex);
+ mutex_init(&adev->gfx.reset_sem_mutex);
+ /* Initialize the mutex for cleaner shader isolation between GFX and compute processes */
+ mutex_init(&adev->enforce_isolation_mutex);
+ for (i = 0; i < MAX_XCP; ++i) {
+ adev->isolation[i].spearhead = dma_fence_get_stub();
+ amdgpu_sync_create(&adev->isolation[i].active);
+ amdgpu_sync_create(&adev->isolation[i].prev);
+ }
+ mutex_init(&adev->gfx.userq_sch_mutex);
+ mutex_init(&adev->gfx.workload_profile_mutex);
+ mutex_init(&adev->vcn.workload_profile_mutex);
+ mutex_init(&adev->userq_mutex);
+
+ amdgpu_device_init_apu_flags(adev);
+
+ r = amdgpu_device_check_arguments(adev);
+ if (r)
+ return r;
- amdgpu_check_arguments(adev);
-
- /* Registers mapping */
- /* TODO: block userspace mapping of io register */
spin_lock_init(&adev->mmio_idx_lock);
spin_lock_init(&adev->smc_idx_lock);
spin_lock_init(&adev->pcie_idx_lock);
spin_lock_init(&adev->uvd_ctx_idx_lock);
spin_lock_init(&adev->didt_idx_lock);
spin_lock_init(&adev->gc_cac_idx_lock);
+ spin_lock_init(&adev->se_cac_idx_lock);
spin_lock_init(&adev->audio_endpt_idx_lock);
spin_lock_init(&adev->mm_stats.lock);
+ spin_lock_init(&adev->virt.rlcg_reg_lock);
+ spin_lock_init(&adev->wb.lock);
+
+ xa_init_flags(&adev->userq_xa, XA_FLAGS_LOCK_IRQ);
+
+ INIT_LIST_HEAD(&adev->reset_list);
+
+ INIT_LIST_HEAD(&adev->ras_list);
+
+ INIT_LIST_HEAD(&adev->pm.od_kobj_list);
+
+ INIT_LIST_HEAD(&adev->userq_mgr_list);
+
+ INIT_DELAYED_WORK(&adev->delayed_init_work,
+ amdgpu_device_delayed_init_work_handler);
+ INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
+ amdgpu_device_delay_enable_gfx_off);
+ /*
+ * Initialize the enforce_isolation work structures for each XCP
+ * partition. This work handler is responsible for enforcing shader
+ * isolation on AMD GPUs. It counts the number of emitted fences for
+ * each GFX and compute ring. If there are any fences, it schedules
+ * the `enforce_isolation_work` to be run after a delay. If there are
+ * no fences, it signals the Kernel Fusion Driver (KFD) to resume the
+ * runqueue.
+ */
+ for (i = 0; i < MAX_XCP; i++) {
+ INIT_DELAYED_WORK(&adev->gfx.enforce_isolation[i].work,
+ amdgpu_gfx_enforce_isolation_handler);
+ adev->gfx.enforce_isolation[i].adev = adev;
+ adev->gfx.enforce_isolation[i].xcp_id = i;
+ }
+
+ INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
- INIT_LIST_HEAD(&adev->shadow_list);
- mutex_init(&adev->shadow_list_lock);
+ adev->gfx.gfx_off_req_count = 1;
+ adev->gfx.gfx_off_residency = 0;
+ adev->gfx.gfx_off_entrycount = 0;
+ adev->pm.ac_power = power_supply_is_system_supplied() > 0;
- INIT_LIST_HEAD(&adev->gtt_list);
- spin_lock_init(&adev->gtt_list_lock);
+ atomic_set(&adev->throttling_logging_enabled, 1);
+ /*
+ * If throttling continues, logging will be performed every minute
+ * to avoid log flooding. "-1" is subtracted since the thermal
+ * throttling interrupt comes every second. Thus, the total logging
+ * interval is 59 seconds(retelimited printk interval) + 1(waiting
+ * for throttling interrupt) = 60 seconds.
+ */
+ ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
+
+ ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);
+ /* Registers mapping */
+ /* TODO: block userspace mapping of io register */
if (adev->asic_type >= CHIP_BONAIRE) {
adev->rmmio_base = pci_resource_start(adev->pdev, 5);
adev->rmmio_size = pci_resource_len(adev->pdev, 5);
@@ -1885,110 +4562,215 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->rmmio_size = pci_resource_len(adev->pdev, 2);
}
+ for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++)
+ atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN);
+
adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
- if (adev->rmmio == NULL) {
+ if (!adev->rmmio)
return -ENOMEM;
- }
- DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
- DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
- if (adev->asic_type >= CHIP_BONAIRE)
- /* doorbell bar mapping */
- amdgpu_doorbell_init(adev);
-
- /* io port mapping */
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
- adev->rio_mem_size = pci_resource_len(adev->pdev, i);
- adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
- break;
- }
+ dev_info(adev->dev, "register mmio base: 0x%08X\n",
+ (uint32_t)adev->rmmio_base);
+ dev_info(adev->dev, "register mmio size: %u\n",
+ (unsigned int)adev->rmmio_size);
+
+ /*
+ * Reset domain needs to be present early, before XGMI hive discovered
+ * (if any) and initialized to use reset sem and in_gpu reset flag
+ * early on during init and before calling to RREG32.
+ */
+ adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
+ if (!adev->reset_domain)
+ return -ENOMEM;
+
+ /* detect hw virtualization here */
+ amdgpu_virt_init(adev);
+
+ amdgpu_device_get_pcie_info(adev);
+
+ r = amdgpu_device_get_job_timeout_settings(adev);
+ if (r) {
+ dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
+ return r;
}
- if (adev->rio_mem == NULL)
- DRM_INFO("PCI I/O BAR is not found.\n");
+ amdgpu_device_set_mcbp(adev);
+
+ /*
+ * By default, use default mode where all blocks are expected to be
+ * initialized. At present a 'swinit' of blocks is required to be
+ * completed before the need for a different level is detected.
+ */
+ amdgpu_set_init_level(adev, AMDGPU_INIT_LEVEL_DEFAULT);
/* early init functions */
- r = amdgpu_early_init(adev);
+ r = amdgpu_device_ip_early_init(adev);
if (r)
return r;
- /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
- /* this will fail for cards that aren't VGA class devices, just
- * ignore it */
- vga_client_register(adev->pdev, adev, NULL, amdgpu_vga_set_decode);
-
- if (amdgpu_runtime_pm == 1)
- runtime = true;
- if (amdgpu_device_is_px(ddev))
- runtime = true;
- if (!pci_is_thunderbolt_attached(adev->pdev))
- vga_switcheroo_register_client(adev->pdev,
- &amdgpu_switcheroo_ops, runtime);
- if (runtime)
- vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
+ /*
+ * No need to remove conflicting FBs for non-display class devices.
+ * This prevents the sysfb from being freed accidently.
+ */
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA ||
+ (pdev->class >> 8) == PCI_CLASS_DISPLAY_OTHER) {
+ /* Get rid of things like offb */
+ r = aperture_remove_conflicting_pci_devices(adev->pdev, amdgpu_kms_driver.name);
+ if (r)
+ return r;
+ }
- /* Read BIOS */
- if (!amdgpu_get_bios(adev)) {
- r = -EINVAL;
- goto failed;
+ /* Enable TMZ based on IP_VERSION */
+ amdgpu_gmc_tmz_set(adev);
+
+ if (amdgpu_sriov_vf(adev) &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
+ /* VF MMIO access (except mailbox range) from CPU
+ * will be blocked during sriov runtime
+ */
+ adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT;
+
+ amdgpu_gmc_noretry_set(adev);
+ /* Need to get xgmi info early to decide the reset behavior*/
+ if (adev->gmc.xgmi.supported) {
+ r = adev->gfxhub.funcs->get_xgmi_info(adev);
+ if (r)
+ return r;
}
- r = amdgpu_atombios_init(adev);
- if (r) {
- dev_err(adev->dev, "amdgpu_atombios_init failed\n");
- goto failed;
+ /* enable PCIE atomic ops */
+ if (amdgpu_sriov_vf(adev)) {
+ if (adev->virt.fw_reserve.p_pf2vf)
+ adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
+ adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_support_flags ==
+ (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
+ /* APUs w/ gfx9 onwards doesn't reply on PCIe atomics, rather it is a
+ * internal path natively support atomics, set have_atomics_support to true.
+ */
+ } else if ((adev->flags & AMD_IS_APU) &&
+ (amdgpu_ip_version(adev, GC_HWIP, 0) >
+ IP_VERSION(9, 0, 0))) {
+ adev->have_atomics_support = true;
+ } else {
+ adev->have_atomics_support =
+ !pci_enable_atomic_ops_to_root(adev->pdev,
+ PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
+ PCI_EXP_DEVCAP2_ATOMIC_COMP64);
+ }
+
+ if (!adev->have_atomics_support)
+ dev_info(adev->dev, "PCIE atomic ops is not supported\n");
+
+ /* doorbell bar mapping and doorbell index init*/
+ amdgpu_doorbell_init(adev);
+
+ if (amdgpu_emu_mode == 1) {
+ /* post the asic on emulation mode */
+ emu_soc_asic_init(adev);
+ goto fence_driver_init;
}
+ amdgpu_reset_init(adev);
+
/* detect if we are with an SRIOV vbios */
- amdgpu_device_detect_sriov_bios(adev);
+ if (adev->bios)
+ amdgpu_device_detect_sriov_bios(adev);
+
+ /* check if we need to reset the asic
+ * E.g., driver was not cleanly unloaded previously, etc.
+ */
+ if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
+ if (adev->gmc.xgmi.num_physical_nodes) {
+ dev_info(adev->dev, "Pending hive reset.\n");
+ amdgpu_set_init_level(adev,
+ AMDGPU_INIT_LEVEL_MINIMAL_XGMI);
+ } else if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
+ !amdgpu_device_has_display_hardware(adev)) {
+ r = psp_gpu_reset(adev);
+ } else {
+ tmp = amdgpu_reset_method;
+ /* It should do a default reset when loading or reloading the driver,
+ * regardless of the module parameter reset_method.
+ */
+ amdgpu_reset_method = AMD_RESET_METHOD_NONE;
+ r = amdgpu_asic_reset(adev);
+ amdgpu_reset_method = tmp;
+ }
+
+ if (r) {
+ dev_err(adev->dev, "asic reset on init failed\n");
+ goto failed;
+ }
+ }
/* Post card if necessary */
- if (amdgpu_vpost_needed(adev)) {
+ if (amdgpu_device_need_post(adev)) {
if (!adev->bios) {
dev_err(adev->dev, "no vBIOS found\n");
r = -EINVAL;
goto failed;
}
- DRM_INFO("GPU posting now...\n");
- r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ dev_info(adev->dev, "GPU posting now...\n");
+ r = amdgpu_device_asic_init(adev);
if (r) {
dev_err(adev->dev, "gpu post error!\n");
goto failed;
}
- } else {
- DRM_INFO("GPU post is not needed\n");
}
- if (!adev->is_atom_fw) {
- /* Initialize clocks */
- r = amdgpu_atombios_get_clock_info(adev);
- if (r) {
- dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
- return r;
+ if (adev->bios) {
+ if (adev->is_atom_fw) {
+ /* Initialize clocks */
+ r = amdgpu_atomfirmware_get_clock_info(adev);
+ if (r) {
+ dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
+ goto failed;
+ }
+ } else {
+ /* Initialize clocks */
+ r = amdgpu_atombios_get_clock_info(adev);
+ if (r) {
+ dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
+ goto failed;
+ }
+ /* init i2c buses */
+ amdgpu_i2c_init(adev);
}
- /* init i2c buses */
- amdgpu_atombios_i2c_init(adev);
}
+fence_driver_init:
/* Fence driver */
- r = amdgpu_fence_driver_init(adev);
+ r = amdgpu_fence_driver_sw_init(adev);
if (r) {
- dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
+ dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
goto failed;
}
/* init the mode config */
- drm_mode_config_init(adev->ddev);
+ drm_mode_config_init(adev_to_drm(adev));
- r = amdgpu_init(adev);
+ r = amdgpu_device_ip_init(adev);
if (r) {
- dev_err(adev->dev, "amdgpu_init failed\n");
- amdgpu_fini(adev);
- goto failed;
+ dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
+ goto release_ras_con;
}
+ amdgpu_fence_driver_hw_init(adev);
+
+ dev_info(adev->dev,
+ "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
+ adev->gfx.config.max_shader_engines,
+ adev->gfx.config.max_sh_per_se,
+ adev->gfx.config.max_cu_per_sh,
+ adev->gfx.cu_info.number);
+
adev->accel_working = true;
+ amdgpu_vm_check_compute_bug(adev);
+
/* Initialize the buffer migration limit. */
if (amdgpu_moverate >= 0)
max_MBps = amdgpu_moverate;
@@ -1997,342 +4779,656 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* Get a log2 for easy divisions. */
adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
- r = amdgpu_ib_pool_init(adev);
- if (r) {
- dev_err(adev->dev, "IB initialization failed (%d).\n", r);
- goto failed;
+ /*
+ * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
+ * Otherwise the mgpu fan boost feature will be skipped due to the
+ * gpu instance is counted less.
+ */
+ amdgpu_register_gpu_instance(adev);
+
+ /* enable clockgating, etc. after ib tests, etc. since some blocks require
+ * explicit gating rather than handling it automatically.
+ */
+ if (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) {
+ r = amdgpu_device_ip_late_init(adev);
+ if (r) {
+ dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
+ goto release_ras_con;
+ }
+ /* must succeed. */
+ amdgpu_ras_resume(adev);
+ queue_delayed_work(system_wq, &adev->delayed_init_work,
+ msecs_to_jiffies(AMDGPU_RESUME_MS));
}
- r = amdgpu_ib_ring_tests(adev);
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_release_full_gpu(adev, true);
+ flush_delayed_work(&adev->delayed_init_work);
+ }
+
+ /*
+ * Place those sysfs registering after `late_init`. As some of those
+ * operations performed in `late_init` might affect the sysfs
+ * interfaces creating.
+ */
+ r = amdgpu_atombios_sysfs_init(adev);
+ if (r)
+ drm_err(&adev->ddev,
+ "registering atombios sysfs failed (%d).\n", r);
+
+ r = amdgpu_pm_sysfs_init(adev);
if (r)
- DRM_ERROR("ib ring test failed (%d).\n", r);
+ dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r);
- amdgpu_fbdev_init(adev);
+ r = amdgpu_ucode_sysfs_init(adev);
+ if (r) {
+ adev->ucode_sysfs_en = false;
+ dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r);
+ } else
+ adev->ucode_sysfs_en = true;
- r = amdgpu_gem_debugfs_init(adev);
+ r = amdgpu_device_attr_sysfs_init(adev);
if (r)
- DRM_ERROR("registering gem debugfs failed (%d).\n", r);
+ dev_err(adev->dev, "Could not create amdgpu device attr\n");
- r = amdgpu_debugfs_regs_init(adev);
+ r = devm_device_add_group(adev->dev, &amdgpu_board_attrs_group);
if (r)
- DRM_ERROR("registering register debugfs failed (%d).\n", r);
+ dev_err(adev->dev,
+ "Could not create amdgpu board attributes\n");
+
+ amdgpu_fru_sysfs_init(adev);
+ amdgpu_reg_state_sysfs_init(adev);
+ amdgpu_xcp_sysfs_init(adev);
- r = amdgpu_debugfs_firmware_init(adev);
+ if (IS_ENABLED(CONFIG_PERF_EVENTS))
+ r = amdgpu_pmu_init(adev);
if (r)
- DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
+ dev_err(adev->dev, "amdgpu_pmu_init failed\n");
- if ((amdgpu_testing & 1)) {
- if (adev->accel_working)
- amdgpu_test_moves(adev);
- else
- DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
- }
- if (amdgpu_benchmarking) {
- if (adev->accel_working)
- amdgpu_benchmark(adev, amdgpu_benchmarking);
- else
- DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
- }
+ /* Have stored pci confspace at hand for restore in sudden PCI error */
+ if (amdgpu_device_cache_pci_state(adev->pdev))
+ pci_restore_state(pdev);
- /* enable clockgating, etc. after ib tests, etc. since some blocks require
- * explicit gating rather than handling it automatically.
+ /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
+ /* this will fail for cards that aren't VGA class devices, just
+ * ignore it
*/
- r = amdgpu_late_init(adev);
- if (r) {
- dev_err(adev->dev, "amdgpu_late_init failed\n");
+ if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
+ vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
+
+ px = amdgpu_device_supports_px(adev);
+
+ if (px || (!dev_is_removable(&adev->pdev->dev) &&
+ apple_gmux_detect(NULL, NULL)))
+ vga_switcheroo_register_client(adev->pdev,
+ &amdgpu_switcheroo_ops, px);
+
+ if (px)
+ vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
+
+ if (adev->init_lvl->level == AMDGPU_INIT_LEVEL_MINIMAL_XGMI)
+ amdgpu_xgmi_reset_on_init(adev);
+
+ amdgpu_device_check_iommu_direct_map(adev);
+
+ adev->pm_nb.notifier_call = amdgpu_device_pm_notifier;
+ r = register_pm_notifier(&adev->pm_nb);
+ if (r)
goto failed;
- }
return 0;
+release_ras_con:
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_release_full_gpu(adev, true);
+
+ /* failed in exclusive mode due to timeout */
+ if (amdgpu_sriov_vf(adev) &&
+ !amdgpu_sriov_runtime(adev) &&
+ amdgpu_virt_mmio_blocked(adev) &&
+ !amdgpu_virt_wait_reset(adev)) {
+ dev_err(adev->dev, "VF exclusive mode timeout\n");
+ /* Don't send request since VF is inactive. */
+ adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
+ adev->virt.ops = NULL;
+ r = -EAGAIN;
+ }
+ amdgpu_release_ras_context(adev);
+
failed:
- if (runtime)
- vga_switcheroo_fini_domain_pm_ops(adev->dev);
+ amdgpu_vf_error_trans_all(adev);
+
return r;
}
+static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
+{
+
+ /* Clear all CPU mappings pointing to this device */
+ unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
+
+ /* Unmap all mapped bars - Doorbell, registers and VRAM */
+ amdgpu_doorbell_fini(adev);
+
+ iounmap(adev->rmmio);
+ adev->rmmio = NULL;
+ if (adev->mman.aper_base_kaddr)
+ iounmap(adev->mman.aper_base_kaddr);
+ adev->mman.aper_base_kaddr = NULL;
+
+ /* Memory manager related */
+ if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
+ arch_phys_wc_del(adev->gmc.vram_mtrr);
+ arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
+ }
+}
+
/**
- * amdgpu_device_fini - tear down the driver
+ * amdgpu_device_fini_hw - tear down the driver
*
* @adev: amdgpu_device pointer
*
* Tear down the driver info (all asics).
* Called at driver shutdown.
*/
-void amdgpu_device_fini(struct amdgpu_device *adev)
+void amdgpu_device_fini_hw(struct amdgpu_device *adev)
{
- int r;
+ dev_info(adev->dev, "amdgpu: finishing device.\n");
+ flush_delayed_work(&adev->delayed_init_work);
- DRM_INFO("amdgpu: finishing device.\n");
+ if (adev->mman.initialized)
+ drain_workqueue(adev->mman.bdev.wq);
adev->shutdown = true;
- if (adev->mode_info.mode_config_initialized)
- drm_crtc_force_disable_all(adev->ddev);
- /* evict vram memory */
- amdgpu_bo_evict_vram(adev);
- amdgpu_ib_pool_fini(adev);
- amdgpu_fence_driver_fini(adev);
- amdgpu_fbdev_fini(adev);
- r = amdgpu_fini(adev);
+
+ unregister_pm_notifier(&adev->pm_nb);
+
+ /* make sure IB test finished before entering exclusive mode
+ * to avoid preemption on IB test
+ */
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_request_full_gpu(adev, false);
+ amdgpu_virt_fini_data_exchange(adev);
+ }
+
+ /* disable all interrupts */
+ amdgpu_irq_disable_all(adev);
+ if (adev->mode_info.mode_config_initialized) {
+ if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
+ drm_helper_force_disable_all(adev_to_drm(adev));
+ else
+ drm_atomic_helper_shutdown(adev_to_drm(adev));
+ }
+ amdgpu_fence_driver_hw_fini(adev);
+
+ if (adev->pm.sysfs_initialized)
+ amdgpu_pm_sysfs_fini(adev);
+ if (adev->ucode_sysfs_en)
+ amdgpu_ucode_sysfs_fini(adev);
+ amdgpu_device_attr_sysfs_fini(adev);
+ amdgpu_fru_sysfs_fini(adev);
+
+ amdgpu_reg_state_sysfs_fini(adev);
+ amdgpu_xcp_sysfs_fini(adev);
+
+ /* disable ras feature must before hw fini */
+ amdgpu_ras_pre_fini(adev);
+
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+
+ amdgpu_device_ip_fini_early(adev);
+
+ amdgpu_irq_fini_hw(adev);
+
+ if (adev->mman.initialized)
+ ttm_device_clear_dma_mappings(&adev->mman.bdev);
+
+ amdgpu_gart_dummy_page_fini(adev);
+
+ if (drm_dev_is_unplugged(adev_to_drm(adev)))
+ amdgpu_device_unmap_mmio(adev);
+
+}
+
+void amdgpu_device_fini_sw(struct amdgpu_device *adev)
+{
+ int i, idx;
+ bool px;
+
+ amdgpu_device_ip_fini(adev);
+ amdgpu_fence_driver_sw_fini(adev);
+ amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
adev->accel_working = false;
+ dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
+ for (i = 0; i < MAX_XCP; ++i) {
+ dma_fence_put(adev->isolation[i].spearhead);
+ amdgpu_sync_free(&adev->isolation[i].active);
+ amdgpu_sync_free(&adev->isolation[i].prev);
+ }
+
+ amdgpu_reset_fini(adev);
+
/* free i2c buses */
amdgpu_i2c_fini(adev);
- amdgpu_atombios_fini(adev);
- kfree(adev->bios);
- adev->bios = NULL;
- if (!pci_is_thunderbolt_attached(adev->pdev))
+
+ if (adev->bios) {
+ if (amdgpu_emu_mode != 1)
+ amdgpu_atombios_fini(adev);
+ amdgpu_bios_release(adev);
+ }
+
+ kfree(adev->fru_info);
+ adev->fru_info = NULL;
+
+ kfree(adev->xcp_mgr);
+ adev->xcp_mgr = NULL;
+
+ px = amdgpu_device_supports_px(adev);
+
+ if (px || (!dev_is_removable(&adev->pdev->dev) &&
+ apple_gmux_detect(NULL, NULL)))
vga_switcheroo_unregister_client(adev->pdev);
- if (adev->flags & AMD_IS_PX)
+
+ if (px)
vga_switcheroo_fini_domain_pm_ops(adev->dev);
- vga_client_register(adev->pdev, NULL, NULL, NULL);
- if (adev->rio_mem)
- pci_iounmap(adev->pdev, adev->rio_mem);
- adev->rio_mem = NULL;
- iounmap(adev->rmmio);
- adev->rmmio = NULL;
- if (adev->asic_type >= CHIP_BONAIRE)
- amdgpu_doorbell_fini(adev);
- amdgpu_debugfs_regs_cleanup(adev);
+
+ if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
+ vga_client_unregister(adev->pdev);
+
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+
+ iounmap(adev->rmmio);
+ adev->rmmio = NULL;
+ drm_dev_exit(idx);
+ }
+
+ if (IS_ENABLED(CONFIG_PERF_EVENTS))
+ amdgpu_pmu_fini(adev);
+ if (adev->mman.discovery_bin)
+ amdgpu_discovery_fini(adev);
+
+ amdgpu_reset_put_reset_domain(adev->reset_domain);
+ adev->reset_domain = NULL;
+
+ kfree(adev->pci_state);
+ kfree(adev->pcie_reset_ctx.swds_pcistate);
+ kfree(adev->pcie_reset_ctx.swus_pcistate);
}
+/**
+ * amdgpu_device_evict_resources - evict device resources
+ * @adev: amdgpu device object
+ *
+ * Evicts all ttm device resources(vram BOs, gart table) from the lru list
+ * of the vram memory type. Mainly used for evicting device resources
+ * at suspend time.
+ *
+ */
+static int amdgpu_device_evict_resources(struct amdgpu_device *adev)
+{
+ int ret;
+
+ /* No need to evict vram on APUs unless going to S4 */
+ if (!adev->in_s4 && (adev->flags & AMD_IS_APU))
+ return 0;
+
+ /* No need to evict when going to S5 through S4 callbacks */
+ if (system_state == SYSTEM_POWER_OFF)
+ return 0;
+
+ ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
+ if (ret) {
+ dev_warn(adev->dev, "evicting device resources failed\n");
+ return ret;
+ }
+
+ if (adev->in_s4) {
+ ret = ttm_device_prepare_hibernation(&adev->mman.bdev);
+ if (ret)
+ dev_err(adev->dev, "prepare hibernation failed, %d\n", ret);
+ }
+ return ret;
+}
/*
* Suspend & resume.
*/
/**
- * amdgpu_device_suspend - initiate device suspend
- *
- * @pdev: drm dev pointer
- * @state: suspend state
+ * amdgpu_device_pm_notifier - Notification block for Suspend/Hibernate events
+ * @nb: notifier block
+ * @mode: suspend mode
+ * @data: data
*
- * Puts the hw in the suspend state (all asics).
- * Returns 0 for success or an error on failure.
- * Called at driver suspend.
+ * This function is called when the system is about to suspend or hibernate.
+ * It is used to set the appropriate flags so that eviction can be optimized
+ * in the pm prepare callback.
*/
-int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
+static int amdgpu_device_pm_notifier(struct notifier_block *nb, unsigned long mode,
+ void *data)
{
- struct amdgpu_device *adev;
- struct drm_crtc *crtc;
- struct drm_connector *connector;
- int r;
+ struct amdgpu_device *adev = container_of(nb, struct amdgpu_device, pm_nb);
- if (dev == NULL || dev->dev_private == NULL) {
- return -ENODEV;
+ switch (mode) {
+ case PM_HIBERNATION_PREPARE:
+ adev->in_s4 = true;
+ break;
+ case PM_POST_HIBERNATION:
+ adev->in_s4 = false;
+ break;
}
- adev = dev->dev_private;
+ return NOTIFY_DONE;
+}
+
+/**
+ * amdgpu_device_prepare - prepare for device suspend
+ *
+ * @dev: drm dev pointer
+ *
+ * Prepare to put the hw in the suspend state (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver suspend.
+ */
+int amdgpu_device_prepare(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int i, r;
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- drm_kms_helper_poll_disable(dev);
+ /* Evict the majority of BOs before starting suspend sequence */
+ r = amdgpu_device_evict_resources(adev);
+ if (r)
+ return r;
+
+ flush_delayed_work(&adev->gfx.gfx_off_delay_work);
- /* turn off display hw */
- drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+ if (!adev->ip_blocks[i].version->funcs->prepare_suspend)
+ continue;
+ r = adev->ip_blocks[i].version->funcs->prepare_suspend(&adev->ip_blocks[i]);
+ if (r)
+ return r;
}
- drm_modeset_unlock_all(dev);
- /* unpin the front buffers and cursors */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb);
- struct amdgpu_bo *robj;
+ return 0;
+}
- if (amdgpu_crtc->cursor_bo) {
- struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
- r = amdgpu_bo_reserve(aobj, true);
- if (r == 0) {
- amdgpu_bo_unpin(aobj);
- amdgpu_bo_unreserve(aobj);
- }
- }
+/**
+ * amdgpu_device_complete - complete power state transition
+ *
+ * @dev: drm dev pointer
+ *
+ * Undo the changes from amdgpu_device_prepare. This will be
+ * called on all resume transitions, including those that failed.
+ */
+void amdgpu_device_complete(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int i;
- if (rfb == NULL || rfb->obj == NULL) {
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.valid)
continue;
- }
- robj = gem_to_amdgpu_bo(rfb->obj);
- /* don't unpin kernel fb objects */
- if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
- r = amdgpu_bo_reserve(robj, true);
- if (r == 0) {
- amdgpu_bo_unpin(robj);
- amdgpu_bo_unreserve(robj);
- }
- }
+ if (!adev->ip_blocks[i].version->funcs->complete)
+ continue;
+ adev->ip_blocks[i].version->funcs->complete(&adev->ip_blocks[i]);
}
- /* evict vram memory */
- amdgpu_bo_evict_vram(adev);
+}
- amdgpu_fence_driver_suspend(adev);
+/**
+ * amdgpu_device_suspend - initiate device suspend
+ *
+ * @dev: drm dev pointer
+ * @notify_clients: notify in-kernel DRM clients
+ *
+ * Puts the hw in the suspend state (all asics).
+ * Returns 0 for success or an error on failure.
+ * Called at driver suspend.
+ */
+int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int r = 0;
- r = amdgpu_suspend(adev);
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
- /* evict remaining vram memory
- * This second call to evict vram is to evict the gart page table
- * using the CPU.
- */
- amdgpu_bo_evict_vram(adev);
+ adev->in_suspend = true;
- if (adev->is_atom_fw)
- amdgpu_atomfirmware_scratch_regs_save(adev);
- else
- amdgpu_atombios_scratch_regs_save(adev);
- pci_save_state(dev->pdev);
- if (suspend) {
- /* Shut down the device */
- pci_disable_device(dev->pdev);
- pci_set_power_state(dev->pdev, PCI_D3hot);
- } else {
- r = amdgpu_asic_reset(adev);
+ if (amdgpu_sriov_vf(adev)) {
+ if (!adev->in_runpm)
+ amdgpu_amdkfd_suspend_process(adev);
+ amdgpu_virt_fini_data_exchange(adev);
+ r = amdgpu_virt_request_full_gpu(adev, false);
if (r)
- DRM_ERROR("amdgpu asic reset failed\n");
+ return r;
}
- if (fbcon) {
- console_lock();
- amdgpu_fbdev_set_suspend(adev, 1);
- console_unlock();
- }
+ if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D3))
+ dev_warn(adev->dev, "smart shift update failed\n");
+
+ if (notify_clients)
+ drm_client_dev_suspend(adev_to_drm(adev), false);
+
+ cancel_delayed_work_sync(&adev->delayed_init_work);
+
+ amdgpu_ras_suspend(adev);
+
+ amdgpu_device_ip_suspend_phase1(adev);
+
+ amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
+ amdgpu_userq_suspend(adev);
+
+ r = amdgpu_device_evict_resources(adev);
+ if (r)
+ return r;
+
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+
+ amdgpu_fence_driver_hw_fini(adev);
+
+ amdgpu_device_ip_suspend_phase2(adev);
+
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_release_full_gpu(adev, false);
+
+ r = amdgpu_dpm_notify_rlc_state(adev, false);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static inline int amdgpu_virt_resume(struct amdgpu_device *adev)
+{
+ int r;
+ unsigned int prev_physical_node_id = adev->gmc.xgmi.physical_node_id;
+
+ /* During VM resume, QEMU programming of VF MSIX table (register GFXMSIX_VECT0_ADDR_LO)
+ * may not work. The access could be blocked by nBIF protection as VF isn't in
+ * exclusive access mode. Exclusive access is enabled now, disable/enable MSIX
+ * so that QEMU reprograms MSIX table.
+ */
+ amdgpu_restore_msix(adev);
+
+ r = adev->gfxhub.funcs->get_xgmi_info(adev);
+ if (r)
+ return r;
+
+ dev_info(adev->dev, "xgmi node, old id %d, new id %d\n",
+ prev_physical_node_id, adev->gmc.xgmi.physical_node_id);
+
+ adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
+ adev->vm_manager.vram_base_offset +=
+ adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
+
return 0;
}
/**
* amdgpu_device_resume - initiate device resume
*
- * @pdev: drm dev pointer
+ * @dev: drm dev pointer
+ * @notify_clients: notify in-kernel DRM clients
*
* Bring the hw back to operating state (all asics).
* Returns 0 for success or an error on failure.
* Called at driver resume.
*/
-int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
+int amdgpu_device_resume(struct drm_device *dev, bool notify_clients)
{
- struct drm_connector *connector;
- struct amdgpu_device *adev = dev->dev_private;
- struct drm_crtc *crtc;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int r = 0;
- if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
- return 0;
-
- if (fbcon)
- console_lock();
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_virt_request_full_gpu(adev, true);
+ if (r)
+ return r;
+ }
- if (resume) {
- pci_set_power_state(dev->pdev, PCI_D0);
- pci_restore_state(dev->pdev);
- r = pci_enable_device(dev->pdev);
+ if (amdgpu_virt_xgmi_migrate_enabled(adev)) {
+ r = amdgpu_virt_resume(adev);
if (r)
- goto unlock;
+ goto exit;
}
- if (adev->is_atom_fw)
- amdgpu_atomfirmware_scratch_regs_restore(adev);
- else
- amdgpu_atombios_scratch_regs_restore(adev);
+
+ if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ if (adev->in_s0ix)
+ amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry);
/* post card */
- if (amdgpu_need_post(adev)) {
- r = amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ if (amdgpu_device_need_post(adev)) {
+ r = amdgpu_device_asic_init(adev);
if (r)
- DRM_ERROR("amdgpu asic init failed\n");
+ dev_err(adev->dev, "amdgpu asic init failed\n");
}
- r = amdgpu_resume(adev);
+ r = amdgpu_device_ip_resume(adev);
+
if (r) {
- DRM_ERROR("amdgpu_resume failed (%d).\n", r);
- goto unlock;
+ dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
+ goto exit;
}
- amdgpu_fence_driver_resume(adev);
- if (resume) {
- r = amdgpu_ib_ring_tests(adev);
- if (r)
- DRM_ERROR("ib ring test failed (%d).\n", r);
- }
+ r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm);
+ if (r)
+ goto exit;
- r = amdgpu_late_init(adev);
+ r = amdgpu_userq_resume(adev);
if (r)
- goto unlock;
-
- /* pin cursors */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- if (amdgpu_crtc->cursor_bo) {
- struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
- r = amdgpu_bo_reserve(aobj, true);
- if (r == 0) {
- r = amdgpu_bo_pin(aobj,
- AMDGPU_GEM_DOMAIN_VRAM,
- &amdgpu_crtc->cursor_addr);
- if (r != 0)
- DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
- amdgpu_bo_unreserve(aobj);
- }
- }
- }
+ goto exit;
- /* blat the mode back in */
- if (fbcon) {
- drm_helper_resume_force_mode(dev);
- /* turn on display hw */
- drm_modeset_lock_all(dev);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- }
- drm_modeset_unlock_all(dev);
+ r = amdgpu_device_ip_late_init(adev);
+ if (r)
+ goto exit;
+
+ queue_delayed_work(system_wq, &adev->delayed_init_work,
+ msecs_to_jiffies(AMDGPU_RESUME_MS));
+exit:
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_init_data_exchange(adev);
+ amdgpu_virt_release_full_gpu(adev, true);
+
+ if (!r && !adev->in_runpm)
+ r = amdgpu_amdkfd_resume_process(adev);
}
- drm_kms_helper_poll_enable(dev);
+ if (r)
+ return r;
- /*
- * Most of the connector probing functions try to acquire runtime pm
- * refs to ensure that the GPU is powered on when connector polling is
- * performed. Since we're calling this from a runtime PM callback,
- * trying to acquire rpm refs will cause us to deadlock.
- *
- * Since we're guaranteed to be holding the rpm lock, it's safe to
- * temporarily disable the rpm helpers so this doesn't deadlock us.
- */
+ /* Make sure IB tests flushed */
+ flush_delayed_work(&adev->delayed_init_work);
+
+ if (notify_clients)
+ drm_client_dev_resume(adev_to_drm(adev), false);
+
+ amdgpu_ras_resume(adev);
+
+ if (adev->mode_info.num_crtc) {
+ /*
+ * Most of the connector probing functions try to acquire runtime pm
+ * refs to ensure that the GPU is powered on when connector polling is
+ * performed. Since we're calling this from a runtime PM callback,
+ * trying to acquire rpm refs will cause us to deadlock.
+ *
+ * Since we're guaranteed to be holding the rpm lock, it's safe to
+ * temporarily disable the rpm helpers so this doesn't deadlock us.
+ */
#ifdef CONFIG_PM
- dev->dev->power.disable_depth++;
+ dev->dev->power.disable_depth++;
#endif
- drm_helper_hpd_irq_event(dev);
+ if (!adev->dc_enabled)
+ drm_helper_hpd_irq_event(dev);
+ else
+ drm_kms_helper_hotplug_event(dev);
#ifdef CONFIG_PM
- dev->dev->power.disable_depth--;
+ dev->dev->power.disable_depth--;
#endif
+ }
- if (fbcon)
- amdgpu_fbdev_set_suspend(adev, 0);
+ amdgpu_vram_mgr_clear_reset_blocks(adev);
+ adev->in_suspend = false;
-unlock:
- if (fbcon)
- console_unlock();
+ if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0))
+ dev_warn(adev->dev, "smart shift update failed\n");
- return r;
+ return 0;
}
-static bool amdgpu_check_soft_reset(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_ip_check_soft_reset - did soft reset succeed
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * The list of all the hardware IPs that make up the asic is walked and
+ * the check_soft_reset callbacks are run. check_soft_reset determines
+ * if the asic is still hung or not.
+ * Returns true if any of the IPs are still in a hung state, false if not.
+ */
+static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
{
int i;
bool asic_hang = false;
+ if (amdgpu_sriov_vf(adev))
+ return true;
+
+ if (amdgpu_asic_need_full_reset(adev))
+ return true;
+
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
if (adev->ip_blocks[i].version->funcs->check_soft_reset)
adev->ip_blocks[i].status.hang =
- adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
+ adev->ip_blocks[i].version->funcs->check_soft_reset(
+ &adev->ip_blocks[i]);
if (adev->ip_blocks[i].status.hang) {
- DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
+ dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
asic_hang = true;
}
}
return asic_hang;
}
-static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * The list of all the hardware IPs that make up the asic is walked and the
+ * pre_soft_reset callbacks are run if the block is hung. pre_soft_reset
+ * handles any IP specific hardware or software state changes that are
+ * necessary for a soft reset to succeed.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
{
int i, r = 0;
@@ -2341,7 +5437,7 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].status.hang &&
adev->ip_blocks[i].version->funcs->pre_soft_reset) {
- r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
+ r = adev->ip_blocks[i].version->funcs->pre_soft_reset(&adev->ip_blocks[i]);
if (r)
return r;
}
@@ -2350,19 +5446,32 @@ static int amdgpu_pre_soft_reset(struct amdgpu_device *adev)
return 0;
}
-static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Some hardware IPs cannot be soft reset. If they are hung, a full gpu
+ * reset is necessary to recover.
+ * Returns true if a full asic reset is required, false if not.
+ */
+static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
{
int i;
+ if (amdgpu_asic_need_full_reset(adev))
+ return true;
+
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
- (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
if (adev->ip_blocks[i].status.hang) {
- DRM_INFO("Some block need full reset!\n");
+ dev_info(adev->dev, "Some block need full reset!\n");
return true;
}
}
@@ -2370,7 +5479,18 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
return false;
}
-static int amdgpu_soft_reset(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_ip_soft_reset - do a soft reset
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * The list of all the hardware IPs that make up the asic is walked and the
+ * soft_reset callbacks are run if the block is hung. soft_reset handles any
+ * IP specific hardware or software state changes that are necessary to soft
+ * reset the IP.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
{
int i, r = 0;
@@ -2379,7 +5499,7 @@ static int amdgpu_soft_reset(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].status.hang &&
adev->ip_blocks[i].version->funcs->soft_reset) {
- r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
+ r = adev->ip_blocks[i].version->funcs->soft_reset(&adev->ip_blocks[i]);
if (r)
return r;
}
@@ -2388,7 +5508,18 @@ static int amdgpu_soft_reset(struct amdgpu_device *adev)
return 0;
}
-static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_ip_post_soft_reset - clean up from soft reset
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * The list of all the hardware IPs that make up the asic is walked and the
+ * post_soft_reset callbacks are run if the asic was hung. post_soft_reset
+ * handles any IP specific hardware or software state changes that are
+ * necessary after the IP has been soft reset.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
{
int i, r = 0;
@@ -2397,7 +5528,7 @@ static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
continue;
if (adev->ip_blocks[i].status.hang &&
adev->ip_blocks[i].version->funcs->post_soft_reset)
- r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
+ r = adev->ip_blocks[i].version->funcs->post_soft_reset(&adev->ip_blocks[i]);
if (r)
return r;
}
@@ -2405,325 +5536,1137 @@ static int amdgpu_post_soft_reset(struct amdgpu_device *adev)
return 0;
}
-bool amdgpu_need_backup(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
+ *
+ * @adev: amdgpu_device pointer
+ * @reset_context: amdgpu reset context pointer
+ *
+ * do VF FLR and reinitialize Asic
+ * return 0 means succeeded otherwise failed
+ */
+static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
+ struct amdgpu_reset_context *reset_context)
{
- if (adev->flags & AMD_IS_APU)
- return false;
+ int r;
+ struct amdgpu_hive_info *hive = NULL;
- return amdgpu_lockup_timeout > 0 ? true : false;
-}
+ if (test_bit(AMDGPU_HOST_FLR, &reset_context->flags)) {
+ if (!amdgpu_ras_get_fed_status(adev))
+ amdgpu_virt_ready_to_reset(adev);
+ amdgpu_virt_wait_reset(adev);
+ clear_bit(AMDGPU_HOST_FLR, &reset_context->flags);
+ r = amdgpu_virt_request_full_gpu(adev, true);
+ } else {
+ r = amdgpu_virt_reset_gpu(adev);
+ }
+ if (r)
+ return r;
-static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct dma_fence **fence)
-{
- uint32_t domain;
- int r;
+ amdgpu_ras_clear_err_state(adev);
+ amdgpu_irq_gpu_reset_resume_helper(adev);
- if (!bo->shadow)
- return 0;
+ /* some sw clean up VF needs to do before recover */
+ amdgpu_virt_post_reset(adev);
- r = amdgpu_bo_reserve(bo, true);
+ /* Resume IP prior to SMC */
+ r = amdgpu_device_ip_reinit_early_sriov(adev);
if (r)
return r;
- domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
- /* if bo has been evicted, then no need to recover */
- if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
- r = amdgpu_bo_validate(bo->shadow);
- if (r) {
- DRM_ERROR("bo validate failed!\n");
- goto err;
- }
- r = amdgpu_ttm_bind(&bo->shadow->tbo, &bo->shadow->tbo.mem);
- if (r) {
- DRM_ERROR("%p bind failed\n", bo->shadow);
- goto err;
- }
+ amdgpu_virt_init_data_exchange(adev);
- r = amdgpu_bo_restore_from_shadow(adev, ring, bo,
- NULL, fence, true);
- if (r) {
- DRM_ERROR("recover page table failed!\n");
- goto err;
- }
+ r = amdgpu_device_fw_loading(adev);
+ if (r)
+ return r;
+
+ /* now we are okay to resume SMC/CP/SDMA */
+ r = amdgpu_device_ip_reinit_late_sriov(adev);
+ if (r)
+ return r;
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ /* Update PSP FW topology after reset */
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
+ r = amdgpu_xgmi_update_topology(hive, adev);
+ if (hive)
+ amdgpu_put_xgmi_hive(hive);
+ if (r)
+ return r;
+
+ r = amdgpu_ib_ring_tests(adev);
+ if (r)
+ return r;
+
+ if (adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST)
+ amdgpu_inc_vram_lost(adev);
+
+ /* need to be called during full access so we can't do it later like
+ * bare-metal does.
+ */
+ amdgpu_amdkfd_post_reset(adev);
+ amdgpu_virt_release_full_gpu(adev, true);
+
+ /* Aldebaran and gfx_11_0_3 support ras in SRIOV, so need resume ras during reset */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3))
+ amdgpu_ras_resume(adev);
+
+ amdgpu_virt_ras_telemetry_post_reset(adev);
+
+ return 0;
+}
+
+/**
+ * amdgpu_device_has_job_running - check if there is any unfinished job
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * check if there is any job running on the device when guest driver receives
+ * FLR notification from host driver. If there are still jobs running, then
+ * the guest driver will not respond the FLR reset. Instead, let the job hit
+ * the timeout and guest driver then issue the reset request.
+ */
+bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!amdgpu_ring_sched_ready(ring))
+ continue;
+
+ if (amdgpu_fence_count_emitted(ring))
+ return true;
}
-err:
- amdgpu_bo_unreserve(bo);
- return r;
+ return false;
}
/**
- * amdgpu_sriov_gpu_reset - reset the asic
+ * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
*
- * @adev: amdgpu device pointer
- * @voluntary: if this reset is requested by guest.
- * (true means by guest and false means by HYPERVISOR )
+ * @adev: amdgpu_device pointer
*
- * Attempt the reset the GPU if it has hung (all asics).
- * for SRIOV case.
- * Returns 0 for success or an error on failure.
+ * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
+ * a hung GPU.
*/
-int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary)
+bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
+{
+
+ if (amdgpu_gpu_recovery == 0)
+ goto disabled;
+
+ /* Skip soft reset check in fatal error mode */
+ if (!amdgpu_ras_is_poison_mode_supported(adev))
+ return true;
+
+ if (amdgpu_sriov_vf(adev))
+ return true;
+
+ if (amdgpu_gpu_recovery == -1) {
+ switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_VERDE:
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_OLAND:
+ case CHIP_HAINAN:
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+#endif
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_CYAN_SKILLFISH:
+ goto disabled;
+ default:
+ break;
+ }
+ }
+
+ return true;
+
+disabled:
+ dev_info(adev->dev, "GPU recovery disabled.\n");
+ return false;
+}
+
+int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
+{
+ u32 i;
+ int ret = 0;
+
+ if (adev->bios)
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
+
+ dev_info(adev->dev, "GPU mode1 reset\n");
+
+ /* Cache the state before bus master disable. The saved config space
+ * values are used in other cases like restore after mode-2 reset.
+ */
+ amdgpu_device_cache_pci_state(adev->pdev);
+
+ /* disable BM */
+ pci_clear_master(adev->pdev);
+
+ if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
+ dev_info(adev->dev, "GPU smu mode1 reset\n");
+ ret = amdgpu_dpm_mode1_reset(adev);
+ } else {
+ dev_info(adev->dev, "GPU psp mode1 reset\n");
+ ret = psp_gpu_reset(adev);
+ }
+
+ if (ret)
+ goto mode1_reset_failed;
+
+ amdgpu_device_load_pci_state(adev->pdev);
+ ret = amdgpu_psp_wait_for_bootloader(adev);
+ if (ret)
+ goto mode1_reset_failed;
+
+ /* wait for asic to come out of reset */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ u32 memsize = adev->nbio.funcs->get_memsize(adev);
+
+ if (memsize != 0xffffffff)
+ break;
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout) {
+ ret = -ETIMEDOUT;
+ goto mode1_reset_failed;
+ }
+
+ if (adev->bios)
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
+
+ return 0;
+
+mode1_reset_failed:
+ dev_err(adev->dev, "GPU mode1 reset failed\n");
+ return ret;
+}
+
+int amdgpu_device_link_reset(struct amdgpu_device *adev)
+{
+ int ret = 0;
+
+ dev_info(adev->dev, "GPU link reset\n");
+
+ if (!amdgpu_reset_in_dpc(adev))
+ ret = amdgpu_dpm_link_reset(adev);
+
+ if (ret)
+ goto link_reset_failed;
+
+ ret = amdgpu_psp_wait_for_bootloader(adev);
+ if (ret)
+ goto link_reset_failed;
+
+ return 0;
+
+link_reset_failed:
+ dev_err(adev->dev, "GPU link reset failed\n");
+ return ret;
+}
+
+int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
+ struct amdgpu_reset_context *reset_context)
{
int i, r = 0;
- int resched;
- struct amdgpu_bo *bo, *tmp;
- struct amdgpu_ring *ring;
- struct dma_fence *fence = NULL, *next = NULL;
+ struct amdgpu_job *job = NULL;
+ struct amdgpu_device *tmp_adev = reset_context->reset_req_dev;
+ bool need_full_reset =
+ test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
- mutex_lock(&adev->virt.lock_reset);
- atomic_inc(&adev->gpu_reset_counter);
- adev->gfx.in_reset = true;
+ if (reset_context->reset_req_dev == adev)
+ job = reset_context->job;
- /* block TTM */
- resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_pre_reset(adev);
+
+ amdgpu_fence_driver_isr_toggle(adev, true);
- /* block scheduler */
+ /* block all schedulers and reset given job's ring */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- ring = adev->rings[i];
+ struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !ring->sched.thread)
+ if (!amdgpu_ring_sched_ready(ring))
continue;
- kthread_park(ring->sched.thread);
- amd_sched_hw_job_reset(&ring->sched);
+ /* Clear job fence from fence drv to avoid force_completion
+ * leave NULL and vm flush fence in fence drv
+ */
+ amdgpu_fence_driver_clear_job_fences(ring);
+
+ /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
+ amdgpu_fence_driver_force_completion(ring);
}
- /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
- amdgpu_fence_driver_force_completion(adev);
+ amdgpu_fence_driver_isr_toggle(adev, false);
+
+ if (job && job->vm)
+ drm_sched_increase_karma(&job->base);
- /* request to take full control of GPU before re-initialization */
- if (voluntary)
- amdgpu_virt_reset_gpu(adev);
+ r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
+ /* If reset handler not implemented, continue; otherwise return */
+ if (r == -EOPNOTSUPP)
+ r = 0;
else
- amdgpu_virt_request_full_gpu(adev, true);
+ return r;
+ /* Don't suspend on bare metal if we are not going to HW reset the ASIC */
+ if (!amdgpu_sriov_vf(adev)) {
- /* Resume IP prior to SMC */
- amdgpu_sriov_reinit_early(adev);
+ if (!need_full_reset)
+ need_full_reset = amdgpu_device_ip_need_full_reset(adev);
- /* we need recover gart prior to run SMC/CP/SDMA resume */
- amdgpu_ttm_recover_gart(adev);
+ if (!need_full_reset && amdgpu_gpu_recovery &&
+ amdgpu_device_ip_check_soft_reset(adev)) {
+ amdgpu_device_ip_pre_soft_reset(adev);
+ r = amdgpu_device_ip_soft_reset(adev);
+ amdgpu_device_ip_post_soft_reset(adev);
+ if (r || amdgpu_device_ip_check_soft_reset(adev)) {
+ dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
+ need_full_reset = true;
+ }
+ }
- /* now we are okay to resume SMC/CP/SDMA */
- amdgpu_sriov_reinit_late(adev);
+ if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags)) {
+ dev_info(tmp_adev->dev, "Dumping IP State\n");
+ /* Trigger ip dump before we reset the asic */
+ for (i = 0; i < tmp_adev->num_ip_blocks; i++)
+ if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state)
+ tmp_adev->ip_blocks[i].version->funcs
+ ->dump_ip_state((void *)&tmp_adev->ip_blocks[i]);
+ dev_info(tmp_adev->dev, "Dumping IP State Completed\n");
+ }
- amdgpu_irq_gpu_reset_resume_helper(adev);
+ if (need_full_reset)
+ r = amdgpu_device_ip_suspend(adev);
+ if (need_full_reset)
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+ else
+ clear_bit(AMDGPU_NEED_FULL_RESET,
+ &reset_context->flags);
+ }
- if (amdgpu_ib_ring_tests(adev))
- dev_err(adev->dev, "[GPU_RESET] ib ring test failed (%d).\n", r);
+ return r;
+}
- /* release full control of GPU after ib test */
- amdgpu_virt_release_full_gpu(adev, true);
+int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context)
+{
+ struct list_head *device_list_handle;
+ bool full_reset, vram_lost = false;
+ struct amdgpu_device *tmp_adev;
+ int r, init_level;
+
+ device_list_handle = reset_context->reset_device_list;
+
+ if (!device_list_handle)
+ return -EINVAL;
- DRM_INFO("recover vram bo from shadow\n");
+ full_reset = test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
- ring = adev->mman.buffer_funcs_ring;
- mutex_lock(&adev->shadow_list_lock);
- list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
- next = NULL;
- amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
- if (fence) {
- r = dma_fence_wait(fence, false);
+ /**
+ * If it's reset on init, it's default init level, otherwise keep level
+ * as recovery level.
+ */
+ if (reset_context->method == AMD_RESET_METHOD_ON_INIT)
+ init_level = AMDGPU_INIT_LEVEL_DEFAULT;
+ else
+ init_level = AMDGPU_INIT_LEVEL_RESET_RECOVERY;
+
+ r = 0;
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ amdgpu_set_init_level(tmp_adev, init_level);
+ if (full_reset) {
+ /* post card */
+ amdgpu_reset_set_dpc_status(tmp_adev, false);
+ amdgpu_ras_clear_err_state(tmp_adev);
+ r = amdgpu_device_asic_init(tmp_adev);
if (r) {
- WARN(r, "recovery from shadow isn't completed\n");
- break;
+ dev_warn(tmp_adev->dev, "asic atom init failed!");
+ } else {
+ dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
+
+ r = amdgpu_device_ip_resume_phase1(tmp_adev);
+ if (r)
+ goto out;
+
+ vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
+
+ if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags))
+ amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job);
+
+ if (vram_lost) {
+ dev_info(
+ tmp_adev->dev,
+ "VRAM is lost due to GPU reset!\n");
+ amdgpu_inc_vram_lost(tmp_adev);
+ }
+
+ r = amdgpu_device_fw_loading(tmp_adev);
+ if (r)
+ return r;
+
+ r = amdgpu_xcp_restore_partition_mode(
+ tmp_adev->xcp_mgr);
+ if (r)
+ goto out;
+
+ r = amdgpu_device_ip_resume_phase2(tmp_adev);
+ if (r)
+ goto out;
+
+ if (tmp_adev->mman.buffer_funcs_ring->sched.ready)
+ amdgpu_ttm_set_buffer_funcs_status(tmp_adev, true);
+
+ r = amdgpu_device_ip_resume_phase3(tmp_adev);
+ if (r)
+ goto out;
+
+ if (vram_lost)
+ amdgpu_device_fill_reset_magic(tmp_adev);
+
+ /*
+ * Add this ASIC as tracked as reset was already
+ * complete successfully.
+ */
+ amdgpu_register_gpu_instance(tmp_adev);
+
+ if (!reset_context->hive &&
+ tmp_adev->gmc.xgmi.num_physical_nodes > 1)
+ amdgpu_xgmi_add_device(tmp_adev);
+
+ r = amdgpu_device_ip_late_init(tmp_adev);
+ if (r)
+ goto out;
+
+ drm_client_dev_resume(adev_to_drm(tmp_adev), false);
+
+ /*
+ * The GPU enters bad state once faulty pages
+ * by ECC has reached the threshold, and ras
+ * recovery is scheduled next. So add one check
+ * here to break recovery if it indeed exceeds
+ * bad page threshold, and remind user to
+ * retire this GPU or setting one bigger
+ * bad_page_threshold value to fix this once
+ * probing driver again.
+ */
+ if (!amdgpu_ras_is_rma(tmp_adev)) {
+ /* must succeed. */
+ amdgpu_ras_resume(tmp_adev);
+ } else {
+ r = -EINVAL;
+ goto out;
+ }
+
+ /* Update PSP FW topology after reset */
+ if (reset_context->hive &&
+ tmp_adev->gmc.xgmi.num_physical_nodes > 1)
+ r = amdgpu_xgmi_update_topology(
+ reset_context->hive, tmp_adev);
}
}
- dma_fence_put(fence);
- fence = next;
- }
- mutex_unlock(&adev->shadow_list_lock);
+out:
+ if (!r) {
+ /* IP init is complete now, set level as default */
+ amdgpu_set_init_level(tmp_adev,
+ AMDGPU_INIT_LEVEL_DEFAULT);
+ amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
+ r = amdgpu_ib_ring_tests(tmp_adev);
+ if (r) {
+ dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
+ r = -EAGAIN;
+ goto end;
+ }
+ }
- if (fence) {
- r = dma_fence_wait(fence, false);
if (r)
- WARN(r, "recovery from shadow isn't completed\n");
+ tmp_adev->asic_reset_res = r;
}
- dma_fence_put(fence);
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !ring->sched.thread)
- continue;
+end:
+ return r;
+}
+
+int amdgpu_do_asic_reset(struct list_head *device_list_handle,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ bool need_full_reset, skip_hw_reset;
+ int r = 0;
+
+ /* Try reset handler method first */
+ tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
+ reset_list);
+
+ reset_context->reset_device_list = device_list_handle;
+ r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
+ /* If reset handler not implemented, continue; otherwise return */
+ if (r == -EOPNOTSUPP)
+ r = 0;
+ else
+ return r;
+
+ /* Reset handler not implemented, use the default method */
+ need_full_reset =
+ test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+ skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags);
+
+ /*
+ * ASIC reset has to be done on all XGMI hive nodes ASAP
+ * to allow proper links negotiation in FW (within 1 sec)
+ */
+ if (!skip_hw_reset && need_full_reset) {
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ /* For XGMI run all resets in parallel to speed up the process */
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!queue_work(system_unbound_wq,
+ &tmp_adev->xgmi_reset_work))
+ r = -EALREADY;
+ } else
+ r = amdgpu_asic_reset(tmp_adev);
+
+ if (r) {
+ dev_err(tmp_adev->dev,
+ "ASIC reset failed with error, %d for drm dev, %s",
+ r, adev_to_drm(tmp_adev)->unique);
+ goto out;
+ }
+ }
- amd_sched_job_recovery(&ring->sched);
- kthread_unpark(ring->sched.thread);
+ /* For XGMI wait for all resets to complete before proceed */
+ if (!r) {
+ list_for_each_entry(tmp_adev, device_list_handle,
+ reset_list) {
+ if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ flush_work(&tmp_adev->xgmi_reset_work);
+ r = tmp_adev->asic_reset_res;
+ if (r)
+ break;
+ }
+ }
+ }
}
- drm_helper_resume_force_mode(adev->ddev);
- ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
- if (r) {
- /* bad news, how to tell it to userspace ? */
- dev_info(adev->dev, "GPU reset failed\n");
+ if (!r && amdgpu_ras_intr_triggered()) {
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ amdgpu_ras_reset_error_count(tmp_adev,
+ AMDGPU_RAS_BLOCK__MMHUB);
+ }
+
+ amdgpu_ras_intr_cleared();
}
- adev->gfx.in_reset = false;
- mutex_unlock(&adev->virt.lock_reset);
+ r = amdgpu_device_reinit_after_reset(reset_context);
+ if (r == -EAGAIN)
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+ else
+ clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
+
+out:
return r;
}
-/**
- * amdgpu_gpu_reset - reset the asic
- *
- * @adev: amdgpu device pointer
- *
- * Attempt the reset the GPU if it has hung (all asics).
- * Returns 0 for success or an error on failure.
- */
-int amdgpu_gpu_reset(struct amdgpu_device *adev)
+static void amdgpu_device_set_mp1_state(struct amdgpu_device *adev)
{
- int i, r;
- int resched;
- bool need_full_reset;
- if (amdgpu_sriov_vf(adev))
- return amdgpu_sriov_gpu_reset(adev, true);
+ switch (amdgpu_asic_reset_method(adev)) {
+ case AMD_RESET_METHOD_MODE1:
+ case AMD_RESET_METHOD_LINK:
+ adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
+ break;
+ case AMD_RESET_METHOD_MODE2:
+ adev->mp1_state = PP_MP1_STATE_RESET;
+ break;
+ default:
+ adev->mp1_state = PP_MP1_STATE_NONE;
+ break;
+ }
+}
- if (!amdgpu_check_soft_reset(adev)) {
- DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
- return 0;
+static void amdgpu_device_unset_mp1_state(struct amdgpu_device *adev)
+{
+ amdgpu_vf_error_trans_all(adev);
+ adev->mp1_state = PP_MP1_STATE_NONE;
+}
+
+static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
+{
+ struct pci_dev *p = NULL;
+
+ p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
+ adev->pdev->bus->number, 1);
+ if (p) {
+ pm_runtime_enable(&(p->dev));
+ pm_runtime_resume(&(p->dev));
}
- atomic_inc(&adev->gpu_reset_counter);
+ pci_dev_put(p);
+}
- /* block TTM */
- resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
+{
+ enum amd_reset_method reset_method;
+ struct pci_dev *p = NULL;
+ u64 expires;
- /* block scheduler */
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
+ /*
+ * For now, only BACO and mode1 reset are confirmed
+ * to suffer the audio issue without proper suspended.
+ */
+ reset_method = amdgpu_asic_reset_method(adev);
+ if ((reset_method != AMD_RESET_METHOD_BACO) &&
+ (reset_method != AMD_RESET_METHOD_MODE1))
+ return -EINVAL;
- if (!ring || !ring->sched.thread)
- continue;
- kthread_park(ring->sched.thread);
- amd_sched_hw_job_reset(&ring->sched);
+ p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
+ adev->pdev->bus->number, 1);
+ if (!p)
+ return -ENODEV;
+
+ expires = pm_runtime_autosuspend_expiration(&(p->dev));
+ if (!expires)
+ /*
+ * If we cannot get the audio device autosuspend delay,
+ * a fixed 4S interval will be used. Considering 3S is
+ * the audio controller default autosuspend delay setting.
+ * 4S used here is guaranteed to cover that.
+ */
+ expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
+
+ while (!pm_runtime_status_suspended(&(p->dev))) {
+ if (!pm_runtime_suspend(&(p->dev)))
+ break;
+
+ if (expires < ktime_get_mono_fast_ns()) {
+ dev_warn(adev->dev, "failed to suspend display audio\n");
+ pci_dev_put(p);
+ /* TODO: abort the succeeding gpu reset? */
+ return -ETIMEDOUT;
+ }
}
- /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
- amdgpu_fence_driver_force_completion(adev);
- need_full_reset = amdgpu_need_full_reset(adev);
+ pm_runtime_disable(&(p->dev));
+
+ pci_dev_put(p);
+ return 0;
+}
+
+static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+#if defined(CONFIG_DEBUG_FS)
+ if (!amdgpu_sriov_vf(adev))
+ cancel_work(&adev->reset_work);
+#endif
+
+ if (adev->kfd.dev)
+ cancel_work(&adev->kfd.reset_work);
+
+ if (amdgpu_sriov_vf(adev))
+ cancel_work(&adev->virt.flr_work);
+
+ if (con && adev->ras_enabled)
+ cancel_work(&con->recovery_work);
+
+}
- if (!need_full_reset) {
- amdgpu_pre_soft_reset(adev);
- r = amdgpu_soft_reset(adev);
- amdgpu_post_soft_reset(adev);
- if (r || amdgpu_check_soft_reset(adev)) {
- DRM_INFO("soft reset failed, will fallback to full reset!\n");
- need_full_reset = true;
+static int amdgpu_device_health_check(struct list_head *device_list_handle)
+{
+ struct amdgpu_device *tmp_adev;
+ int ret = 0;
+
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ ret |= amdgpu_device_bus_status_check(tmp_adev);
+ }
+
+ return ret;
+}
+
+static void amdgpu_device_recovery_prepare(struct amdgpu_device *adev,
+ struct list_head *device_list,
+ struct amdgpu_hive_info *hive)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+
+ /*
+ * Build list of devices to reset.
+ * In case we are in XGMI hive mode, resort the device list
+ * to put adev in the 1st position.
+ */
+ if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ list_add_tail(&tmp_adev->reset_list, device_list);
+ if (adev->shutdown)
+ tmp_adev->shutdown = true;
+ if (amdgpu_reset_in_dpc(adev))
+ tmp_adev->pcie_reset_ctx.in_link_reset = true;
}
+ if (!list_is_first(&adev->reset_list, device_list))
+ list_rotate_to_front(&adev->reset_list, device_list);
+ } else {
+ list_add_tail(&adev->reset_list, device_list);
}
+}
+
+static void amdgpu_device_recovery_get_reset_lock(struct amdgpu_device *adev,
+ struct list_head *device_list)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+
+ if (list_empty(device_list))
+ return;
+ tmp_adev =
+ list_first_entry(device_list, struct amdgpu_device, reset_list);
+ amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
+}
+
+static void amdgpu_device_recovery_put_reset_lock(struct amdgpu_device *adev,
+ struct list_head *device_list)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+
+ if (list_empty(device_list))
+ return;
+ tmp_adev =
+ list_first_entry(device_list, struct amdgpu_device, reset_list);
+ amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
+}
+
+static void amdgpu_device_halt_activities(struct amdgpu_device *adev,
+ struct amdgpu_job *job,
+ struct amdgpu_reset_context *reset_context,
+ struct list_head *device_list,
+ struct amdgpu_hive_info *hive,
+ bool need_emergency_restart)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ int i;
- if (need_full_reset) {
- r = amdgpu_suspend(adev);
+ /* block all schedulers and reset given job's ring */
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
+ amdgpu_device_set_mp1_state(tmp_adev);
+
+ /*
+ * Try to put the audio codec into suspend state
+ * before gpu reset started.
+ *
+ * Due to the power domain of the graphics device
+ * is shared with AZ power domain. Without this,
+ * we may change the audio hardware from behind
+ * the audio driver's back. That will trigger
+ * some audio codec errors.
+ */
+ if (!amdgpu_device_suspend_display_audio(tmp_adev))
+ tmp_adev->pcie_reset_ctx.audio_suspended = true;
+
+ amdgpu_ras_set_error_query_ready(tmp_adev, false);
+
+ cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
+
+ amdgpu_amdkfd_pre_reset(tmp_adev, reset_context);
+
+ /*
+ * Mark these ASICs to be reset as untracked first
+ * And add them back after reset completed
+ */
+ amdgpu_unregister_gpu_instance(tmp_adev);
+
+ drm_client_dev_suspend(adev_to_drm(tmp_adev), false);
+
+ /* disable ras on ALL IPs */
+ if (!need_emergency_restart && !amdgpu_reset_in_dpc(adev) &&
+ amdgpu_device_ip_need_full_reset(tmp_adev))
+ amdgpu_ras_suspend(tmp_adev);
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = tmp_adev->rings[i];
+
+ if (!amdgpu_ring_sched_ready(ring))
+ continue;
+
+ drm_sched_stop(&ring->sched, job ? &job->base : NULL);
-retry:
- /* Disable fb access */
- if (adev->mode_info.num_crtc) {
- struct amdgpu_mode_mc_save save;
- amdgpu_display_stop_mc_access(adev, &save);
- amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
+ if (need_emergency_restart)
+ amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
}
- if (adev->is_atom_fw)
- amdgpu_atomfirmware_scratch_regs_save(adev);
- else
- amdgpu_atombios_scratch_regs_save(adev);
- r = amdgpu_asic_reset(adev);
- if (adev->is_atom_fw)
- amdgpu_atomfirmware_scratch_regs_restore(adev);
- else
- amdgpu_atombios_scratch_regs_restore(adev);
- /* post card */
- amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ atomic_inc(&tmp_adev->gpu_reset_counter);
+ }
+}
- if (!r) {
- dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
- r = amdgpu_resume(adev);
+static int amdgpu_device_asic_reset(struct amdgpu_device *adev,
+ struct list_head *device_list,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ int retry_limit = AMDGPU_MAX_RETRY_LIMIT;
+ int r = 0;
+
+retry: /* Rest of adevs pre asic reset from XGMI hive. */
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
+ r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context);
+ /*TODO Should we stop ?*/
+ if (r) {
+ dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
+ r, adev_to_drm(tmp_adev)->unique);
+ tmp_adev->asic_reset_res = r;
}
}
- if (!r) {
- amdgpu_irq_gpu_reset_resume_helper(adev);
- if (need_full_reset && amdgpu_need_backup(adev)) {
- r = amdgpu_ttm_recover_gart(adev);
- if (r)
- DRM_ERROR("gart recovery failed!!!\n");
+
+ /* Actual ASIC resets if needed.*/
+ /* Host driver will handle XGMI hive reset for SRIOV */
+ if (amdgpu_sriov_vf(adev)) {
+
+ /* Bail out of reset early */
+ if (amdgpu_ras_is_rma(adev))
+ return -ENODEV;
+
+ if (amdgpu_ras_get_fed_status(adev) || amdgpu_virt_rcvd_ras_interrupt(adev)) {
+ dev_dbg(adev->dev, "Detected RAS error, wait for FLR completion\n");
+ amdgpu_ras_set_fed(adev, true);
+ set_bit(AMDGPU_HOST_FLR, &reset_context->flags);
}
- r = amdgpu_ib_ring_tests(adev);
- if (r) {
- dev_err(adev->dev, "ib ring test failed (%d).\n", r);
- r = amdgpu_suspend(adev);
- need_full_reset = true;
+
+ r = amdgpu_device_reset_sriov(adev, reset_context);
+ if (AMDGPU_RETRY_SRIOV_RESET(r) && (retry_limit--) > 0) {
+ amdgpu_virt_release_full_gpu(adev, true);
goto retry;
}
- /**
- * recovery vm page tables, since we cannot depend on VRAM is
- * consistent after gpu full reset.
+ if (r)
+ adev->asic_reset_res = r;
+ } else {
+ r = amdgpu_do_asic_reset(device_list, reset_context);
+ if (r && r == -EAGAIN)
+ goto retry;
+ }
+
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
+ /*
+ * Drop any pending non scheduler resets queued before reset is done.
+ * Any reset scheduled after this point would be valid. Scheduler resets
+ * were already dropped during drm_sched_stop and no new ones can come
+ * in before drm_sched_start.
*/
- if (need_full_reset && amdgpu_need_backup(adev)) {
- struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
- struct amdgpu_bo *bo, *tmp;
- struct dma_fence *fence = NULL, *next = NULL;
-
- DRM_INFO("recover vram bo from shadow\n");
- mutex_lock(&adev->shadow_list_lock);
- list_for_each_entry_safe(bo, tmp, &adev->shadow_list, shadow_list) {
- next = NULL;
- amdgpu_recover_vram_from_shadow(adev, ring, bo, &next);
- if (fence) {
- r = dma_fence_wait(fence, false);
- if (r) {
- WARN(r, "recovery from shadow isn't completed\n");
- break;
- }
- }
+ amdgpu_device_stop_pending_resets(tmp_adev);
+ }
+
+ return r;
+}
+
+static int amdgpu_device_sched_resume(struct list_head *device_list,
+ struct amdgpu_reset_context *reset_context,
+ bool job_signaled)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+ int i, r = 0;
+
+ /* Post ASIC reset for all devs .*/
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
- dma_fence_put(fence);
- fence = next;
- }
- mutex_unlock(&adev->shadow_list_lock);
- if (fence) {
- r = dma_fence_wait(fence, false);
- if (r)
- WARN(r, "recovery from shadow isn't completed\n");
- }
- dma_fence_put(fence);
- }
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- struct amdgpu_ring *ring = adev->rings[i];
+ struct amdgpu_ring *ring = tmp_adev->rings[i];
- if (!ring || !ring->sched.thread)
+ if (!amdgpu_ring_sched_ready(ring))
continue;
- amd_sched_job_recovery(&ring->sched);
- kthread_unpark(ring->sched.thread);
+ drm_sched_start(&ring->sched, 0);
}
- } else {
- dev_err(adev->dev, "asic resume failed (%d).\n", r);
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
- if (adev->rings[i] && adev->rings[i]->sched.thread) {
- kthread_unpark(adev->rings[i]->sched.thread);
- }
+
+ if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
+ drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
+
+ if (tmp_adev->asic_reset_res) {
+ /* bad news, how to tell it to userspace ?
+ * for ras error, we should report GPU bad status instead of
+ * reset failure
+ */
+ if (reset_context->src != AMDGPU_RESET_SRC_RAS ||
+ !amdgpu_ras_eeprom_check_err_threshold(tmp_adev))
+ dev_info(
+ tmp_adev->dev,
+ "GPU reset(%d) failed with error %d \n",
+ atomic_read(
+ &tmp_adev->gpu_reset_counter),
+ tmp_adev->asic_reset_res);
+ amdgpu_vf_error_put(tmp_adev,
+ AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0,
+ tmp_adev->asic_reset_res);
+ if (!r)
+ r = tmp_adev->asic_reset_res;
+ tmp_adev->asic_reset_res = 0;
+ } else {
+ dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n",
+ atomic_read(&tmp_adev->gpu_reset_counter));
+ if (amdgpu_acpi_smart_shift_update(tmp_adev,
+ AMDGPU_SS_DEV_D0))
+ dev_warn(tmp_adev->dev,
+ "smart shift update failed\n");
}
}
- drm_helper_resume_force_mode(adev->ddev);
+ return r;
+}
- ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
- if (r) {
- /* bad news, how to tell it to userspace ? */
- dev_info(adev->dev, "GPU reset failed\n");
+static void amdgpu_device_gpu_resume(struct amdgpu_device *adev,
+ struct list_head *device_list,
+ bool need_emergency_restart)
+{
+ struct amdgpu_device *tmp_adev = NULL;
+
+ list_for_each_entry(tmp_adev, device_list, reset_list) {
+ /* unlock kfd: SRIOV would do it separately */
+ if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
+ amdgpu_amdkfd_post_reset(tmp_adev);
+
+ /* kfd_post_reset will do nothing if kfd device is not initialized,
+ * need to bring up kfd here if it's not be initialized before
+ */
+ if (!adev->kfd.init_complete)
+ amdgpu_amdkfd_device_init(adev);
+
+ if (tmp_adev->pcie_reset_ctx.audio_suspended)
+ amdgpu_device_resume_display_audio(tmp_adev);
+
+ amdgpu_device_unset_mp1_state(tmp_adev);
+
+ amdgpu_ras_set_error_query_ready(tmp_adev, true);
+
+ }
+}
+
+
+/**
+ * amdgpu_device_gpu_recover - reset the asic and recover scheduler
+ *
+ * @adev: amdgpu_device pointer
+ * @job: which job trigger hang
+ * @reset_context: amdgpu reset context pointer
+ *
+ * Attempt to reset the GPU if it has hung (all asics).
+ * Attempt to do soft-reset or full-reset and reinitialize Asic
+ * Returns 0 for success or an error on failure.
+ */
+
+int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
+ struct amdgpu_job *job,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct list_head device_list;
+ bool job_signaled = false;
+ struct amdgpu_hive_info *hive = NULL;
+ int r = 0;
+ bool need_emergency_restart = false;
+
+ /*
+ * If it reaches here because of hang/timeout and a RAS error is
+ * detected at the same time, let RAS recovery take care of it.
+ */
+ if (amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY) &&
+ !amdgpu_sriov_vf(adev) &&
+ reset_context->src != AMDGPU_RESET_SRC_RAS) {
+ dev_dbg(adev->dev,
+ "Gpu recovery from source: %d yielding to RAS error recovery handling",
+ reset_context->src);
+ return 0;
+ }
+
+ /*
+ * Special case: RAS triggered and full reset isn't supported
+ */
+ need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);
+
+ /*
+ * Flush RAM to disk so that after reboot
+ * the user can read log and see why the system rebooted.
+ */
+ if (need_emergency_restart && amdgpu_ras_get_context(adev) &&
+ amdgpu_ras_get_context(adev)->reboot) {
+ dev_warn(adev->dev, "Emergency reboot.");
+
+ ksys_sync_helper();
+ emergency_restart();
+ }
+
+ dev_info(adev->dev, "GPU %s begin!. Source: %d\n",
+ need_emergency_restart ? "jobs stop" : "reset",
+ reset_context->src);
+
+ if (!amdgpu_sriov_vf(adev))
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive)
+ mutex_lock(&hive->hive_lock);
+
+ reset_context->job = job;
+ reset_context->hive = hive;
+ INIT_LIST_HEAD(&device_list);
+
+ amdgpu_device_recovery_prepare(adev, &device_list, hive);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ r = amdgpu_device_health_check(&device_list);
+ if (r)
+ goto end_reset;
+ }
+
+ /* We need to lock reset domain only once both for XGMI and single device */
+ amdgpu_device_recovery_get_reset_lock(adev, &device_list);
+
+ amdgpu_device_halt_activities(adev, job, reset_context, &device_list,
+ hive, need_emergency_restart);
+ if (need_emergency_restart)
+ goto skip_sched_resume;
+ /*
+ * Must check guilty signal here since after this point all old
+ * HW fences are force signaled.
+ *
+ * job->base holds a reference to parent fence
+ */
+ if (job && dma_fence_is_signaled(&job->hw_fence.base)) {
+ job_signaled = true;
+ dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
+ goto skip_hw_reset;
+ }
+
+ r = amdgpu_device_asic_reset(adev, &device_list, reset_context);
+ if (r)
+ goto reset_unlock;
+skip_hw_reset:
+ r = amdgpu_device_sched_resume(&device_list, reset_context, job_signaled);
+ if (r)
+ goto reset_unlock;
+skip_sched_resume:
+ amdgpu_device_gpu_resume(adev, &device_list, need_emergency_restart);
+reset_unlock:
+ amdgpu_device_recovery_put_reset_lock(adev, &device_list);
+end_reset:
+ if (hive) {
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
+ }
+
+ if (r)
+ dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
+
+ atomic_set(&adev->reset_domain->reset_res, r);
+
+ if (!r) {
+ struct amdgpu_task_info *ti = NULL;
+
+ if (job)
+ ti = amdgpu_vm_get_task_info_pasid(adev, job->pasid);
+
+ drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE,
+ ti ? &ti->task : NULL);
+
+ amdgpu_vm_put_task_info(ti);
}
return r;
}
-void amdgpu_get_pcie_info(struct amdgpu_device *adev)
+/**
+ * amdgpu_device_partner_bandwidth - find the bandwidth of appropriate partner
+ *
+ * @adev: amdgpu_device pointer
+ * @speed: pointer to the speed of the link
+ * @width: pointer to the width of the link
+ *
+ * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
+ * first physical partner to an AMD dGPU.
+ * This will exclude any virtual switches and links.
+ */
+static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
+ enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
{
- u32 mask;
- int ret;
+ struct pci_dev *parent = adev->pdev;
+
+ if (!speed || !width)
+ return;
+
+ *speed = PCI_SPEED_UNKNOWN;
+ *width = PCIE_LNK_WIDTH_UNKNOWN;
+
+ if (amdgpu_device_pcie_dynamic_switching_supported(adev)) {
+ while ((parent = pci_upstream_bridge(parent))) {
+ /* skip upstream/downstream switches internal to dGPU*/
+ if (parent->vendor == PCI_VENDOR_ID_ATI)
+ continue;
+ *speed = pcie_get_speed_cap(parent);
+ *width = pcie_get_width_cap(parent);
+ break;
+ }
+ } else {
+ /* use the current speeds rather than max if switching is not supported */
+ pcie_bandwidth_available(adev->pdev, NULL, speed, width);
+ }
+}
+
+/**
+ * amdgpu_device_gpu_bandwidth - find the bandwidth of the GPU
+ *
+ * @adev: amdgpu_device pointer
+ * @speed: pointer to the speed of the link
+ * @width: pointer to the width of the link
+ *
+ * Evaluate the hierarchy to find the speed and bandwidth capabilities of the
+ * AMD dGPU which may be a virtual upstream bridge.
+ */
+static void amdgpu_device_gpu_bandwidth(struct amdgpu_device *adev,
+ enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
+{
+ struct pci_dev *parent = adev->pdev;
+
+ if (!speed || !width)
+ return;
+
+ parent = pci_upstream_bridge(parent);
+ if (parent && parent->vendor == PCI_VENDOR_ID_ATI) {
+ /* use the upstream/downstream switches internal to dGPU */
+ *speed = pcie_get_speed_cap(parent);
+ *width = pcie_get_width_cap(parent);
+ while ((parent = pci_upstream_bridge(parent))) {
+ if (parent->vendor == PCI_VENDOR_ID_ATI) {
+ /* use the upstream/downstream switches internal to dGPU */
+ *speed = pcie_get_speed_cap(parent);
+ *width = pcie_get_width_cap(parent);
+ }
+ }
+ } else {
+ /* use the device itself */
+ *speed = pcie_get_speed_cap(adev->pdev);
+ *width = pcie_get_width_cap(adev->pdev);
+ }
+}
+
+/**
+ * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Fetches and stores in the driver the PCIE capabilities (gen speed
+ * and lanes) of the slot the device is in. Handles APUs and
+ * virtualized environments where PCIE config space may not be available.
+ */
+static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
+{
+ enum pci_bus_speed speed_cap, platform_speed_cap;
+ enum pcie_link_width platform_link_width, link_width;
if (amdgpu_pcie_gen_cap)
adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
@@ -2732,7 +6675,7 @@ void amdgpu_get_pcie_info(struct amdgpu_device *adev)
adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
/* covers APUs as well */
- if (pci_is_root_bus(adev->pdev->bus)) {
+ if (pci_is_root_bus(adev->pdev->bus) && !amdgpu_passthrough(adev)) {
if (adev->pm.pcie_gen_mask == 0)
adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
if (adev->pm.pcie_mlw_mask == 0)
@@ -2740,773 +6683,1001 @@ void amdgpu_get_pcie_info(struct amdgpu_device *adev)
return;
}
+ if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
+ return;
+
+ amdgpu_device_partner_bandwidth(adev, &platform_speed_cap,
+ &platform_link_width);
+ amdgpu_device_gpu_bandwidth(adev, &speed_cap, &link_width);
+
if (adev->pm.pcie_gen_mask == 0) {
- ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
- if (!ret) {
- adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ /* asic caps */
+ if (speed_cap == PCI_SPEED_UNKNOWN) {
+ adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
-
- if (mask & DRM_PCIE_SPEED_25)
- adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
- if (mask & DRM_PCIE_SPEED_50)
- adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
- if (mask & DRM_PCIE_SPEED_80)
- adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
} else {
- adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
+ if (speed_cap == PCIE_SPEED_32_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
+ else if (speed_cap == PCIE_SPEED_16_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
+ else if (speed_cap == PCIE_SPEED_8_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
+ else if (speed_cap == PCIE_SPEED_5_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
+ else
+ adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
+ }
+ /* platform caps */
+ if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
+ adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
+ } else {
+ if (platform_speed_cap == PCIE_SPEED_32_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
+ else if (platform_speed_cap == PCIE_SPEED_16_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
+ else if (platform_speed_cap == PCIE_SPEED_8_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
+ else if (platform_speed_cap == PCIE_SPEED_5_0GT)
+ adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
+ else
+ adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
+
}
}
if (adev->pm.pcie_mlw_mask == 0) {
- ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
- if (!ret) {
- switch (mask) {
- case 32:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ /* asic caps */
+ if (link_width == PCIE_LNK_WIDTH_UNKNOWN) {
+ adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_ASIC_PCIE_MLW_MASK;
+ } else {
+ switch (link_width) {
+ case PCIE_LNK_X32:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
- case 16:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ case PCIE_LNK_X16:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
- case 12:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ case PCIE_LNK_X12:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
- case 8:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ case PCIE_LNK_X8:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
- case 4:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ case PCIE_LNK_X4:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
- case 2:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ case PCIE_LNK_X2:
+ adev->pm.pcie_mlw_mask |= (CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
- case 1:
- adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
+ case PCIE_LNK_X1:
+ adev->pm.pcie_mlw_mask |= CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X1;
break;
default:
break;
}
+ }
+ /* platform caps */
+ if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
+ adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
} else {
- adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
+ switch (platform_link_width) {
+ case PCIE_LNK_X32:
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X16:
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X12:
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X8:
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X4:
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X2:
+ adev->pm.pcie_mlw_mask |= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case PCIE_LNK_X1:
+ adev->pm.pcie_mlw_mask |= CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
+ break;
+ default:
+ break;
+ }
}
}
}
-/*
- * Debugfs
+/**
+ * amdgpu_device_is_peer_accessible - Check peer access through PCIe BAR
+ *
+ * @adev: amdgpu_device pointer
+ * @peer_adev: amdgpu_device pointer for peer device trying to access @adev
+ *
+ * Return true if @peer_adev can access (DMA) @adev through the PCIe
+ * BAR, i.e. @adev is "large BAR" and the BAR matches the DMA mask of
+ * @peer_adev.
*/
-int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
- const struct drm_info_list *files,
- unsigned nfiles)
+bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
+ struct amdgpu_device *peer_adev)
{
- unsigned i;
-
- for (i = 0; i < adev->debugfs_count; i++) {
- if (adev->debugfs[i].files == files) {
- /* Already registered */
- return 0;
- }
- }
-
- i = adev->debugfs_count + 1;
- if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
- DRM_ERROR("Reached maximum number of debugfs components.\n");
- DRM_ERROR("Report so we increase "
- "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
- return -EINVAL;
- }
- adev->debugfs[adev->debugfs_count].files = files;
- adev->debugfs[adev->debugfs_count].num_files = nfiles;
- adev->debugfs_count = i;
-#if defined(CONFIG_DEBUG_FS)
- drm_debugfs_create_files(files, nfiles,
- adev->ddev->primary->debugfs_root,
- adev->ddev->primary);
+#ifdef CONFIG_HSA_AMD_P2P
+ bool p2p_access =
+ !adev->gmc.xgmi.connected_to_cpu &&
+ !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0);
+ if (!p2p_access)
+ dev_info(adev->dev, "PCIe P2P access from peer device %s is not supported by the chipset\n",
+ pci_name(peer_adev->pdev));
+
+ bool is_large_bar = adev->gmc.visible_vram_size &&
+ adev->gmc.real_vram_size == adev->gmc.visible_vram_size;
+ bool p2p_addressable = amdgpu_device_check_iommu_remap(peer_adev);
+
+ if (!p2p_addressable) {
+ uint64_t address_mask = peer_adev->dev->dma_mask ?
+ ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
+ resource_size_t aper_limit =
+ adev->gmc.aper_base + adev->gmc.aper_size - 1;
+
+ p2p_addressable = !(adev->gmc.aper_base & address_mask ||
+ aper_limit & address_mask);
+ }
+ return pcie_p2p && is_large_bar && p2p_access && p2p_addressable;
+#else
+ return false;
#endif
- return 0;
}
-#if defined(CONFIG_DEBUG_FS)
-
-static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
+int amdgpu_device_baco_enter(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
- bool pm_pg_lock, use_bank;
- unsigned instance_bank, sh_bank, se_bank;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
+ if (!amdgpu_device_supports_baco(adev))
+ return -ENOTSUPP;
- /* are we reading registers for which a PG lock is necessary? */
- pm_pg_lock = (*pos >> 23) & 1;
-
- if (*pos & (1ULL << 62)) {
- se_bank = (*pos >> 24) & 0x3FF;
- sh_bank = (*pos >> 34) & 0x3FF;
- instance_bank = (*pos >> 44) & 0x3FF;
-
- if (se_bank == 0x3FF)
- se_bank = 0xFFFFFFFF;
- if (sh_bank == 0x3FF)
- sh_bank = 0xFFFFFFFF;
- if (instance_bank == 0x3FF)
- instance_bank = 0xFFFFFFFF;
- use_bank = 1;
- } else {
- use_bank = 0;
- }
+ if (ras && adev->ras_enabled &&
+ adev->nbio.funcs->enable_doorbell_interrupt)
+ adev->nbio.funcs->enable_doorbell_interrupt(adev, false);
- *pos &= (1UL << 22) - 1;
-
- if (use_bank) {
- if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
- (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
- return -EINVAL;
- mutex_lock(&adev->grbm_idx_mutex);
- amdgpu_gfx_select_se_sh(adev, se_bank,
- sh_bank, instance_bank);
- }
+ return amdgpu_dpm_baco_enter(adev);
+}
- if (pm_pg_lock)
- mutex_lock(&adev->pm.mutex);
+int amdgpu_device_baco_exit(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ int ret = 0;
- while (size) {
- uint32_t value;
+ if (!amdgpu_device_supports_baco(adev))
+ return -ENOTSUPP;
- if (*pos > adev->rmmio_size)
- goto end;
+ ret = amdgpu_dpm_baco_exit(adev);
+ if (ret)
+ return ret;
- value = RREG32(*pos >> 2);
- r = put_user(value, (uint32_t *)buf);
- if (r) {
- result = r;
- goto end;
- }
+ if (ras && adev->ras_enabled &&
+ adev->nbio.funcs->enable_doorbell_interrupt)
+ adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
+ if (amdgpu_passthrough(adev) && adev->nbio.funcs &&
+ adev->nbio.funcs->clear_doorbell_interrupt)
+ adev->nbio.funcs->clear_doorbell_interrupt(adev);
-end:
- if (use_bank) {
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- }
+ return 0;
+}
- if (pm_pg_lock)
- mutex_unlock(&adev->pm.mutex);
+/**
+ * amdgpu_pci_error_detected - Called when a PCI error is detected.
+ * @pdev: PCI device struct
+ * @state: PCI channel state
+ *
+ * Description: Called when a PCI error is detected.
+ *
+ * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
+ */
+pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_hive_info *hive __free(xgmi_put_hive) =
+ amdgpu_get_xgmi_hive(adev);
+ struct amdgpu_reset_context reset_context;
+ struct list_head device_list;
+
+ dev_info(adev->dev, "PCI error: detected callback!!\n");
+
+ adev->pci_channel_state = state;
+
+ switch (state) {
+ case pci_channel_io_normal:
+ dev_info(adev->dev, "pci_channel_io_normal: state(%d)!!\n", state);
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ /* Fatal error, prepare for slot reset */
+ dev_info(adev->dev, "pci_channel_io_frozen: state(%d)!!\n", state);
+ if (hive) {
+ /* Hive devices should be able to support FW based
+ * link reset on other devices, if not return.
+ */
+ if (!amdgpu_dpm_is_link_reset_supported(adev)) {
+ dev_warn(adev->dev,
+ "No support for XGMI hive yet...\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ /* Set dpc status only if device is part of hive
+ * Non-hive devices should be able to recover after
+ * link reset.
+ */
+ amdgpu_reset_set_dpc_status(adev, true);
- return result;
+ mutex_lock(&hive->hive_lock);
+ }
+ memset(&reset_context, 0, sizeof(reset_context));
+ INIT_LIST_HEAD(&device_list);
+
+ amdgpu_device_recovery_prepare(adev, &device_list, hive);
+ amdgpu_device_recovery_get_reset_lock(adev, &device_list);
+ amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list,
+ hive, false);
+ if (hive)
+ mutex_unlock(&hive->hive_lock);
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ /* Permanent error, prepare for device removal */
+ dev_info(adev->dev, "pci_channel_io_perm_failure: state(%d)!!\n", state);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
}
-static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
- size_t size, loff_t *pos)
+/**
+ * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
+ * @pdev: pointer to PCI device
+ */
+pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
- bool pm_pg_lock, use_bank;
- unsigned instance_bank, sh_bank, se_bank;
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
+ dev_info(adev->dev, "PCI error: mmio enabled callback!!\n");
+
+ /* TODO - dump whatever for debugging purposes */
+
+ /* This called only if amdgpu_pci_error_detected returns
+ * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
+ * works, no need to reset slot.
+ */
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
- /* are we reading registers for which a PG lock is necessary? */
- pm_pg_lock = (*pos >> 23) & 1;
-
- if (*pos & (1ULL << 62)) {
- se_bank = (*pos >> 24) & 0x3FF;
- sh_bank = (*pos >> 34) & 0x3FF;
- instance_bank = (*pos >> 44) & 0x3FF;
-
- if (se_bank == 0x3FF)
- se_bank = 0xFFFFFFFF;
- if (sh_bank == 0x3FF)
- sh_bank = 0xFFFFFFFF;
- if (instance_bank == 0x3FF)
- instance_bank = 0xFFFFFFFF;
- use_bank = 1;
+/**
+ * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
+ * @pdev: PCI device struct
+ *
+ * Description: This routine is called by the pci error recovery
+ * code after the PCI slot has been reset, just before we
+ * should resume normal operations.
+ */
+pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_reset_context reset_context;
+ struct amdgpu_device *tmp_adev;
+ struct amdgpu_hive_info *hive;
+ struct list_head device_list;
+ struct pci_dev *link_dev;
+ int r = 0, i, timeout;
+ u32 memsize;
+ u16 status;
+
+ dev_info(adev->dev, "PCI error: slot reset callback!!\n");
+
+ memset(&reset_context, 0, sizeof(reset_context));
+
+ if (adev->pcie_reset_ctx.swus)
+ link_dev = adev->pcie_reset_ctx.swus;
+ else
+ link_dev = adev->pdev;
+ /* wait for asic to come out of reset, timeout = 10s */
+ timeout = 10000;
+ do {
+ usleep_range(10000, 10500);
+ r = pci_read_config_word(link_dev, PCI_VENDOR_ID, &status);
+ timeout -= 10;
+ } while (timeout > 0 && (status != PCI_VENDOR_ID_ATI) &&
+ (status != PCI_VENDOR_ID_AMD));
+
+ if ((status != PCI_VENDOR_ID_ATI) && (status != PCI_VENDOR_ID_AMD)) {
+ r = -ETIME;
+ goto out;
+ }
+
+ amdgpu_device_load_switch_state(adev);
+ /* Restore PCI confspace */
+ amdgpu_device_load_pci_state(pdev);
+
+ /* confirm ASIC came out of reset */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ memsize = amdgpu_asic_get_config_memsize(adev);
+
+ if (memsize != 0xffffffff)
+ break;
+ udelay(1);
+ }
+ if (memsize == 0xffffffff) {
+ r = -ETIME;
+ goto out;
+ }
+
+ reset_context.method = AMD_RESET_METHOD_NONE;
+ reset_context.reset_req_dev = adev;
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
+ INIT_LIST_HEAD(&device_list);
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ mutex_lock(&hive->hive_lock);
+ reset_context.hive = hive;
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ tmp_adev->pcie_reset_ctx.in_link_reset = true;
+ list_add_tail(&tmp_adev->reset_list, &device_list);
+ }
} else {
- use_bank = 0;
+ set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
+ list_add_tail(&adev->reset_list, &device_list);
}
- *pos &= (1UL << 22) - 1;
-
- if (use_bank) {
- if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
- (se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines))
- return -EINVAL;
- mutex_lock(&adev->grbm_idx_mutex);
- amdgpu_gfx_select_se_sh(adev, se_bank,
- sh_bank, instance_bank);
+ r = amdgpu_device_asic_reset(adev, &device_list, &reset_context);
+out:
+ if (!r) {
+ if (amdgpu_device_cache_pci_state(adev->pdev))
+ pci_restore_state(adev->pdev);
+ dev_info(adev->dev, "PCIe error recovery succeeded\n");
+ } else {
+ dev_err(adev->dev, "PCIe error recovery failed, err:%d\n", r);
+ if (hive) {
+ list_for_each_entry(tmp_adev, &device_list, reset_list)
+ amdgpu_device_unset_mp1_state(tmp_adev);
+ }
+ amdgpu_device_recovery_put_reset_lock(adev, &device_list);
}
- if (pm_pg_lock)
- mutex_lock(&adev->pm.mutex);
+ if (hive) {
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
+ }
- while (size) {
- uint32_t value;
+ return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
+}
- if (*pos > adev->rmmio_size)
- return result;
+/**
+ * amdgpu_pci_resume() - resume normal ops after PCI reset
+ * @pdev: pointer to PCI device
+ *
+ * Called when the error recovery driver tells us that its
+ * OK to resume normal operation.
+ */
+void amdgpu_pci_resume(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct list_head device_list;
+ struct amdgpu_hive_info *hive = NULL;
+ struct amdgpu_device *tmp_adev = NULL;
- r = get_user(value, (uint32_t *)buf);
- if (r)
- return r;
+ dev_info(adev->dev, "PCI error: resume callback!!\n");
- WREG32(*pos >> 2, value);
+ /* Only continue execution for the case of pci_channel_io_frozen */
+ if (adev->pci_channel_state != pci_channel_io_frozen)
+ return;
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
+ INIT_LIST_HEAD(&device_list);
- if (use_bank) {
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- }
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ mutex_lock(&hive->hive_lock);
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ tmp_adev->pcie_reset_ctx.in_link_reset = false;
+ list_add_tail(&tmp_adev->reset_list, &device_list);
+ }
+ } else
+ list_add_tail(&adev->reset_list, &device_list);
- if (pm_pg_lock)
- mutex_unlock(&adev->pm.mutex);
+ amdgpu_device_sched_resume(&device_list, NULL, NULL);
+ amdgpu_device_gpu_resume(adev, &device_list, false);
+ amdgpu_device_recovery_put_reset_lock(adev, &device_list);
- return result;
+ if (hive) {
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
+ }
}
-static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
+static void amdgpu_device_cache_switch_state(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
+ struct pci_dev *swus, *swds;
int r;
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
+ swds = pci_upstream_bridge(adev->pdev);
+ if (!swds || swds->vendor != PCI_VENDOR_ID_ATI ||
+ pci_pcie_type(swds) != PCI_EXP_TYPE_DOWNSTREAM)
+ return;
+ swus = pci_upstream_bridge(swds);
+ if (!swus ||
+ (swus->vendor != PCI_VENDOR_ID_ATI &&
+ swus->vendor != PCI_VENDOR_ID_AMD) ||
+ pci_pcie_type(swus) != PCI_EXP_TYPE_UPSTREAM)
+ return;
- value = RREG32_PCIE(*pos >> 2);
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
+ /* If already saved, return */
+ if (adev->pcie_reset_ctx.swus)
+ return;
+ /* Upstream bridge is ATI, assume it's SWUS/DS architecture */
+ r = pci_save_state(swds);
+ if (r)
+ return;
+ adev->pcie_reset_ctx.swds_pcistate = pci_store_saved_state(swds);
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
+ r = pci_save_state(swus);
+ if (r)
+ return;
+ adev->pcie_reset_ctx.swus_pcistate = pci_store_saved_state(swus);
- return result;
+ adev->pcie_reset_ctx.swus = swus;
}
-static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf,
- size_t size, loff_t *pos)
+static void amdgpu_device_load_switch_state(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
+ struct pci_dev *pdev;
int r;
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
-
- r = get_user(value, (uint32_t *)buf);
- if (r)
- return r;
-
- WREG32_PCIE(*pos >> 2, value);
+ if (!adev->pcie_reset_ctx.swds_pcistate ||
+ !adev->pcie_reset_ctx.swus_pcistate)
+ return;
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
+ pdev = adev->pcie_reset_ctx.swus;
+ r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swus_pcistate);
+ if (!r) {
+ pci_restore_state(pdev);
+ } else {
+ dev_warn(adev->dev, "Failed to load SWUS state, err:%d\n", r);
+ return;
}
- return result;
+ pdev = pci_upstream_bridge(adev->pdev);
+ r = pci_load_saved_state(pdev, adev->pcie_reset_ctx.swds_pcistate);
+ if (!r)
+ pci_restore_state(pdev);
+ else
+ dev_warn(adev->dev, "Failed to load SWDS state, err:%d\n", r);
}
-static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
+bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
int r;
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
+ if (amdgpu_sriov_vf(adev))
+ return false;
- while (size) {
- uint32_t value;
+ r = pci_save_state(pdev);
+ if (!r) {
+ kfree(adev->pci_state);
- value = RREG32_DIDT(*pos >> 2);
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
+ adev->pci_state = pci_store_saved_state(pdev);
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
+ if (!adev->pci_state) {
+ dev_err(adev->dev, "Failed to store PCI saved state");
+ return false;
+ }
+ } else {
+ dev_warn(adev->dev, "Failed to save PCI state, err:%d\n", r);
+ return false;
}
- return result;
+ amdgpu_device_cache_switch_state(adev);
+
+ return true;
}
-static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf,
- size_t size, loff_t *pos)
+bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
int r;
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
-
- while (size) {
- uint32_t value;
-
- r = get_user(value, (uint32_t *)buf);
- if (r)
- return r;
+ if (!adev->pci_state)
+ return false;
- WREG32_DIDT(*pos >> 2, value);
+ r = pci_load_saved_state(pdev, adev->pci_state);
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
+ if (!r) {
+ pci_restore_state(pdev);
+ } else {
+ dev_warn(adev->dev, "Failed to load PCI state, err:%d\n", r);
+ return false;
}
- return result;
+ return true;
}
-static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
+void amdgpu_device_flush_hdp(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
+#ifdef CONFIG_X86_64
+ if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
+ return;
+#endif
+ if (adev->gmc.xgmi.connected_to_cpu)
+ return;
- while (size) {
- uint32_t value;
+ if (ring && ring->funcs->emit_hdp_flush)
+ amdgpu_ring_emit_hdp_flush(ring);
+ else
+ amdgpu_asic_flush_hdp(adev, ring);
+}
- value = RREG32_SMC(*pos);
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
+void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+#ifdef CONFIG_X86_64
+ if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev))
+ return;
+#endif
+ if (adev->gmc.xgmi.connected_to_cpu)
+ return;
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
+ amdgpu_asic_invalidate_hdp(adev, ring);
+}
- return result;
+int amdgpu_in_reset(struct amdgpu_device *adev)
+{
+ return atomic_read(&adev->reset_domain->in_gpu_reset);
}
-static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf,
- size_t size, loff_t *pos)
+/**
+ * amdgpu_device_halt() - bring hardware to some kind of halt state
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Bring hardware to some kind of halt state so that no one can touch it
+ * any more. It will help to maintain error context when error occurred.
+ * Compare to a simple hang, the system will keep stable at least for SSH
+ * access. Then it should be trivial to inspect the hardware state and
+ * see what's going on. Implemented as following:
+ *
+ * 1. drm_dev_unplug() makes device inaccessible to user space(IOCTLs, etc),
+ * clears all CPU mappings to device, disallows remappings through page faults
+ * 2. amdgpu_irq_disable_all() disables all interrupts
+ * 3. amdgpu_fence_driver_hw_fini() signals all HW fences
+ * 4. set adev->no_hw_access to avoid potential crashes after setp 5
+ * 5. amdgpu_device_unmap_mmio() clears all MMIO mappings
+ * 6. pci_disable_device() and pci_wait_for_pending_transaction()
+ * flush any in flight DMA operations
+ */
+void amdgpu_device_halt(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
+ struct pci_dev *pdev = adev->pdev;
+ struct drm_device *ddev = adev_to_drm(adev);
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
+ amdgpu_xcp_dev_unplug(adev);
+ drm_dev_unplug(ddev);
- while (size) {
- uint32_t value;
+ amdgpu_irq_disable_all(adev);
- r = get_user(value, (uint32_t *)buf);
- if (r)
- return r;
+ amdgpu_fence_driver_hw_fini(adev);
- WREG32_SMC(*pos, value);
+ adev->no_hw_access = true;
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
+ amdgpu_device_unmap_mmio(adev);
- return result;
+ pci_disable_device(pdev);
+ pci_wait_for_pending_transaction(pdev);
}
-static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
+u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev,
+ u32 reg)
{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
- uint32_t *config, no_regs = 0;
+ unsigned long flags, address, data;
+ u32 r;
- if (size & 0x3 || *pos & 0x3)
- return -EINVAL;
+ address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
- config = kmalloc_array(256, sizeof(*config), GFP_KERNEL);
- if (!config)
- return -ENOMEM;
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(address, reg * 4);
+ (void)RREG32(address);
+ r = RREG32(data);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+ return r;
+}
- /* version, increment each time something is added */
- config[no_regs++] = 3;
- config[no_regs++] = adev->gfx.config.max_shader_engines;
- config[no_regs++] = adev->gfx.config.max_tile_pipes;
- config[no_regs++] = adev->gfx.config.max_cu_per_sh;
- config[no_regs++] = adev->gfx.config.max_sh_per_se;
- config[no_regs++] = adev->gfx.config.max_backends_per_se;
- config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
- config[no_regs++] = adev->gfx.config.max_gprs;
- config[no_regs++] = adev->gfx.config.max_gs_threads;
- config[no_regs++] = adev->gfx.config.max_hw_contexts;
- config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
- config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
- config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
- config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
- config[no_regs++] = adev->gfx.config.num_tile_pipes;
- config[no_regs++] = adev->gfx.config.backend_enable_mask;
- config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
- config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
- config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
- config[no_regs++] = adev->gfx.config.num_gpus;
- config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
- config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
- config[no_regs++] = adev->gfx.config.gb_addr_config;
- config[no_regs++] = adev->gfx.config.num_rbs;
-
- /* rev==1 */
- config[no_regs++] = adev->rev_id;
- config[no_regs++] = adev->pg_flags;
- config[no_regs++] = adev->cg_flags;
-
- /* rev==2 */
- config[no_regs++] = adev->family;
- config[no_regs++] = adev->external_rev_id;
-
- /* rev==3 */
- config[no_regs++] = adev->pdev->device;
- config[no_regs++] = adev->pdev->revision;
- config[no_regs++] = adev->pdev->subsystem_device;
- config[no_regs++] = adev->pdev->subsystem_vendor;
-
- while (size && (*pos < no_regs * 4)) {
- uint32_t value;
-
- value = config[*pos >> 2];
- r = put_user(value, (uint32_t *)buf);
- if (r) {
- kfree(config);
- return r;
- }
+void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev,
+ u32 reg, u32 v)
+{
+ unsigned long flags, address, data;
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
- }
+ address = adev->nbio.funcs->get_pcie_port_index_offset(adev);
+ data = adev->nbio.funcs->get_pcie_port_data_offset(adev);
- kfree(config);
- return result;
+ spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+ WREG32(address, reg * 4);
+ (void)RREG32(address);
+ WREG32(data, v);
+ (void)RREG32(data);
+ spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}
-static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
+/**
+ * amdgpu_device_get_gang - return a reference to the current gang
+ * @adev: amdgpu_device pointer
+ *
+ * Returns: A new reference to the current gang leader.
+ */
+struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- int idx, x, outsize, r, valuesize;
- uint32_t values[16];
-
- if (size & 3 || *pos & 0x3)
- return -EINVAL;
-
- if (amdgpu_dpm == 0)
- return -EINVAL;
+ struct dma_fence *fence;
- /* convert offset to sensor number */
- idx = *pos >> 2;
+ rcu_read_lock();
+ fence = dma_fence_get_rcu_safe(&adev->gang_submit);
+ rcu_read_unlock();
+ return fence;
+}
- valuesize = sizeof(values);
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
- r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize);
- else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
- r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
- &valuesize);
- else
- return -EINVAL;
+/**
+ * amdgpu_device_switch_gang - switch to a new gang
+ * @adev: amdgpu_device pointer
+ * @gang: the gang to switch to
+ *
+ * Try to switch to a new gang.
+ * Returns: NULL if we switched to the new gang or a reference to the current
+ * gang leader.
+ */
+struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev,
+ struct dma_fence *gang)
+{
+ struct dma_fence *old = NULL;
- if (size > valuesize)
- return -EINVAL;
+ dma_fence_get(gang);
+ do {
+ dma_fence_put(old);
+ old = amdgpu_device_get_gang(adev);
+ if (old == gang)
+ break;
- outsize = 0;
- x = 0;
- if (!r) {
- while (size) {
- r = put_user(values[x++], (int32_t *)buf);
- buf += 4;
- size -= 4;
- outsize += 4;
+ if (!dma_fence_is_signaled(old)) {
+ dma_fence_put(gang);
+ return old;
}
- }
- return !r ? outsize : r;
+ } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit,
+ old, gang) != old);
+
+ /*
+ * Drop it once for the exchanged reference in adev and once for the
+ * thread local reference acquired in amdgpu_device_get_gang().
+ */
+ dma_fence_put(old);
+ dma_fence_put(old);
+ return NULL;
}
-static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
+/**
+ * amdgpu_device_enforce_isolation - enforce HW isolation
+ * @adev: the amdgpu device pointer
+ * @ring: the HW ring the job is supposed to run on
+ * @job: the job which is about to be pushed to the HW ring
+ *
+ * Makes sure that only one client at a time can use the GFX block.
+ * Returns: The dependency to wait on before the job can be pushed to the HW.
+ * The function is called multiple times until NULL is returned.
+ */
+struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ struct amdgpu_job *job)
{
- struct amdgpu_device *adev = f->f_inode->i_private;
- int r, x;
- ssize_t result=0;
- uint32_t offset, se, sh, cu, wave, simd, data[32];
+ struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
+ struct drm_sched_fence *f = job->base.s_fence;
+ struct dma_fence *dep;
+ void *owner;
+ int r;
- if (size & 3 || *pos & 3)
- return -EINVAL;
+ /*
+ * For now enforce isolation only for the GFX block since we only need
+ * the cleaner shader on those rings.
+ */
+ if (ring->funcs->type != AMDGPU_RING_TYPE_GFX &&
+ ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
+ return NULL;
- /* decode offset */
- offset = (*pos & 0x7F);
- se = ((*pos >> 7) & 0xFF);
- sh = ((*pos >> 15) & 0xFF);
- cu = ((*pos >> 23) & 0xFF);
- wave = ((*pos >> 31) & 0xFF);
- simd = ((*pos >> 37) & 0xFF);
+ /*
+ * All submissions where enforce isolation is false are handled as if
+ * they come from a single client. Use ~0l as the owner to distinct it
+ * from kernel submissions where the owner is NULL.
+ */
+ owner = job->enforce_isolation ? f->owner : (void *)~0l;
- /* switch to the specific se/sh/cu */
- mutex_lock(&adev->grbm_idx_mutex);
- amdgpu_gfx_select_se_sh(adev, se, sh, cu);
+ mutex_lock(&adev->enforce_isolation_mutex);
- x = 0;
- if (adev->gfx.funcs->read_wave_data)
- adev->gfx.funcs->read_wave_data(adev, simd, wave, data, &x);
+ /*
+ * The "spearhead" submission is the first one which changes the
+ * ownership to its client. We always need to wait for it to be
+ * pushed to the HW before proceeding with anything.
+ */
+ if (&f->scheduled != isolation->spearhead &&
+ !dma_fence_is_signaled(isolation->spearhead)) {
+ dep = isolation->spearhead;
+ goto out_grab_ref;
+ }
- amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
- mutex_unlock(&adev->grbm_idx_mutex);
+ if (isolation->owner != owner) {
- if (!x)
- return -EINVAL;
+ /*
+ * Wait for any gang to be assembled before switching to a
+ * different owner or otherwise we could deadlock the
+ * submissions.
+ */
+ if (!job->gang_submit) {
+ dep = amdgpu_device_get_gang(adev);
+ if (!dma_fence_is_signaled(dep))
+ goto out_return_dep;
+ dma_fence_put(dep);
+ }
- while (size && (offset < x * 4)) {
- uint32_t value;
+ dma_fence_put(isolation->spearhead);
+ isolation->spearhead = dma_fence_get(&f->scheduled);
+ amdgpu_sync_move(&isolation->active, &isolation->prev);
+ trace_amdgpu_isolation(isolation->owner, owner);
+ isolation->owner = owner;
+ }
- value = data[offset >> 2];
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
+ /*
+ * Specifying the ring here helps to pipeline submissions even when
+ * isolation is enabled. If that is not desired for testing NULL can be
+ * used instead of the ring to enforce a CPU round trip while switching
+ * between clients.
+ */
+ dep = amdgpu_sync_peek_fence(&isolation->prev, ring);
+ r = amdgpu_sync_fence(&isolation->active, &f->finished, GFP_NOWAIT);
+ if (r)
+ dev_warn(adev->dev, "OOM tracking isolation\n");
- result += 4;
- buf += 4;
- offset += 4;
- size -= 4;
+out_grab_ref:
+ dma_fence_get(dep);
+out_return_dep:
+ mutex_unlock(&adev->enforce_isolation_mutex);
+ return dep;
+}
+
+bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_HAINAN:
+#endif
+ case CHIP_TOPAZ:
+ /* chips with no display hardware */
+ return false;
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+#endif
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ /* chips with display hardware */
+ return true;
+ default:
+ /* IP discovery */
+ if (!amdgpu_ip_version(adev, DCE_HWIP, 0) ||
+ (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
+ return false;
+ return true;
}
+}
- return result;
+uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev,
+ uint32_t inst, uint32_t reg_addr, char reg_name[],
+ uint32_t expected_value, uint32_t mask)
+{
+ uint32_t ret = 0;
+ uint32_t old_ = 0;
+ uint32_t tmp_ = RREG32(reg_addr);
+ uint32_t loop = adev->usec_timeout;
+
+ while ((tmp_ & (mask)) != (expected_value)) {
+ if (old_ != tmp_) {
+ loop = adev->usec_timeout;
+ old_ = tmp_;
+ } else
+ udelay(1);
+ tmp_ = RREG32(reg_addr);
+ loop--;
+ if (!loop) {
+ dev_warn(
+ adev->dev,
+ "Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn",
+ inst, reg_name, (uint32_t)expected_value,
+ (uint32_t)(tmp_ & (mask)));
+ ret = -ETIMEDOUT;
+ break;
+ }
+ }
+ return ret;
}
-static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
+ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring)
{
- struct amdgpu_device *adev = f->f_inode->i_private;
- int r;
- ssize_t result = 0;
- uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
+ ssize_t size = 0;
- if (size & 3 || *pos & 3)
- return -EINVAL;
+ if (!ring || !ring->adev)
+ return size;
- /* decode offset */
- offset = (*pos & 0xFFF); /* in dwords */
- se = ((*pos >> 12) & 0xFF);
- sh = ((*pos >> 20) & 0xFF);
- cu = ((*pos >> 28) & 0xFF);
- wave = ((*pos >> 36) & 0xFF);
- simd = ((*pos >> 44) & 0xFF);
- thread = ((*pos >> 52) & 0xFF);
- bank = ((*pos >> 60) & 1);
-
- data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ if (amdgpu_device_should_recover_gpu(ring->adev))
+ size |= AMDGPU_RESET_TYPE_FULL;
- /* switch to the specific se/sh/cu */
- mutex_lock(&adev->grbm_idx_mutex);
- amdgpu_gfx_select_se_sh(adev, se, sh, cu);
+ if (unlikely(!ring->adev->debug_disable_soft_recovery) &&
+ !amdgpu_sriov_vf(ring->adev) && ring->funcs->soft_recovery)
+ size |= AMDGPU_RESET_TYPE_SOFT_RESET;
- if (bank == 0) {
- if (adev->gfx.funcs->read_wave_vgprs)
- adev->gfx.funcs->read_wave_vgprs(adev, simd, wave, thread, offset, size>>2, data);
- } else {
- if (adev->gfx.funcs->read_wave_sgprs)
- adev->gfx.funcs->read_wave_sgprs(adev, simd, wave, offset, size>>2, data);
- }
-
- amdgpu_gfx_select_se_sh(adev, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF);
- mutex_unlock(&adev->grbm_idx_mutex);
+ return size;
+}
- while (size) {
- uint32_t value;
+ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset)
+{
+ ssize_t size = 0;
- value = data[offset++];
- r = put_user(value, (uint32_t *)buf);
- if (r) {
- result = r;
- goto err;
- }
+ if (supported_reset == 0) {
+ size += sysfs_emit_at(buf, size, "unsupported");
+ size += sysfs_emit_at(buf, size, "\n");
+ return size;
- result += 4;
- buf += 4;
- size -= 4;
}
-err:
- kfree(data);
- return result;
-}
+ if (supported_reset & AMDGPU_RESET_TYPE_SOFT_RESET)
+ size += sysfs_emit_at(buf, size, "soft ");
-static const struct file_operations amdgpu_debugfs_regs_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_regs_read,
- .write = amdgpu_debugfs_regs_write,
- .llseek = default_llseek
-};
-static const struct file_operations amdgpu_debugfs_regs_didt_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_regs_didt_read,
- .write = amdgpu_debugfs_regs_didt_write,
- .llseek = default_llseek
-};
-static const struct file_operations amdgpu_debugfs_regs_pcie_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_regs_pcie_read,
- .write = amdgpu_debugfs_regs_pcie_write,
- .llseek = default_llseek
-};
-static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_regs_smc_read,
- .write = amdgpu_debugfs_regs_smc_write,
- .llseek = default_llseek
-};
+ if (supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)
+ size += sysfs_emit_at(buf, size, "queue ");
-static const struct file_operations amdgpu_debugfs_gca_config_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_gca_config_read,
- .llseek = default_llseek
-};
+ if (supported_reset & AMDGPU_RESET_TYPE_PER_PIPE)
+ size += sysfs_emit_at(buf, size, "pipe ");
-static const struct file_operations amdgpu_debugfs_sensors_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_sensor_read,
- .llseek = default_llseek
-};
-
-static const struct file_operations amdgpu_debugfs_wave_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_wave_read,
- .llseek = default_llseek
-};
-static const struct file_operations amdgpu_debugfs_gpr_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_debugfs_gpr_read,
- .llseek = default_llseek
-};
-
-static const struct file_operations *debugfs_regs[] = {
- &amdgpu_debugfs_regs_fops,
- &amdgpu_debugfs_regs_didt_fops,
- &amdgpu_debugfs_regs_pcie_fops,
- &amdgpu_debugfs_regs_smc_fops,
- &amdgpu_debugfs_gca_config_fops,
- &amdgpu_debugfs_sensors_fops,
- &amdgpu_debugfs_wave_fops,
- &amdgpu_debugfs_gpr_fops,
-};
+ if (supported_reset & AMDGPU_RESET_TYPE_FULL)
+ size += sysfs_emit_at(buf, size, "full ");
-static const char *debugfs_regs_names[] = {
- "amdgpu_regs",
- "amdgpu_regs_didt",
- "amdgpu_regs_pcie",
- "amdgpu_regs_smc",
- "amdgpu_gca_config",
- "amdgpu_sensors",
- "amdgpu_wave",
- "amdgpu_gpr",
-};
+ size += sysfs_emit_at(buf, size, "\n");
+ return size;
+}
-static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
+void amdgpu_device_set_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst,
+ uint64_t uid)
{
- struct drm_minor *minor = adev->ddev->primary;
- struct dentry *ent, *root = minor->debugfs_root;
- unsigned i, j;
+ if (!uid_info)
+ return;
- for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
- ent = debugfs_create_file(debugfs_regs_names[i],
- S_IFREG | S_IRUGO, root,
- adev, debugfs_regs[i]);
- if (IS_ERR(ent)) {
- for (j = 0; j < i; j++) {
- debugfs_remove(adev->debugfs_regs[i]);
- adev->debugfs_regs[i] = NULL;
- }
- return PTR_ERR(ent);
- }
+ if (type >= AMDGPU_UID_TYPE_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n",
+ type);
+ return;
+ }
- if (!i)
- i_size_write(ent->d_inode, adev->rmmio_size);
- adev->debugfs_regs[i] = ent;
+ if (inst >= AMDGPU_UID_INST_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n",
+ inst);
+ return;
}
- return 0;
+ if (uid_info->uid[type][inst] != 0) {
+ dev_warn_once(
+ uid_info->adev->dev,
+ "Overwriting existing UID %llu for type %d instance %d\n",
+ uid_info->uid[type][inst], type, inst);
+ }
+
+ uid_info->uid[type][inst] = uid;
}
-static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
+u64 amdgpu_device_get_uid(struct amdgpu_uid *uid_info,
+ enum amdgpu_uid_type type, uint8_t inst)
{
- unsigned i;
+ if (!uid_info)
+ return 0;
- for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
- if (adev->debugfs_regs[i]) {
- debugfs_remove(adev->debugfs_regs[i]);
- adev->debugfs_regs[i] = NULL;
- }
+ if (type >= AMDGPU_UID_TYPE_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID type %d\n",
+ type);
+ return 0;
}
-}
-int amdgpu_debugfs_init(struct drm_minor *minor)
-{
- return 0;
-}
-#else
-static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
-{
- return 0;
+ if (inst >= AMDGPU_UID_INST_MAX) {
+ dev_err_once(uid_info->adev->dev, "Invalid UID instance %d\n",
+ inst);
+ return 0;
+ }
+
+ return uid_info->uid[type][inst];
}
-static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
new file mode 100644
index 000000000000..eb605e79ae0e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_DF_H__
+#define __AMDGPU_DF_H__
+
+struct amdgpu_df_hash_status {
+ bool hash_64k;
+ bool hash_2m;
+ bool hash_1g;
+};
+
+struct amdgpu_df_funcs {
+ void (*sw_init)(struct amdgpu_device *adev);
+ void (*sw_fini)(struct amdgpu_device *adev);
+ void (*hw_init)(struct amdgpu_device *adev);
+ void (*enable_broadcast_mode)(struct amdgpu_device *adev,
+ bool enable);
+ u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
+ u32 (*get_hbm_channel_number)(struct amdgpu_device *adev);
+ void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
+ bool enable);
+ void (*get_clockgating_state)(struct amdgpu_device *adev,
+ u64 *flags);
+ void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev,
+ bool enable);
+ int (*pmc_start)(struct amdgpu_device *adev, uint64_t config,
+ int counter_idx, int is_add);
+ int (*pmc_stop)(struct amdgpu_device *adev, uint64_t config,
+ int counter_idx, int is_remove);
+ void (*pmc_get_count)(struct amdgpu_device *adev, uint64_t config,
+ int counter_idx, uint64_t *count);
+ uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
+ void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val,
+ uint32_t ficadl_val, uint32_t ficadh_val);
+ bool (*query_ras_poison_mode)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_df {
+ struct amdgpu_df_hash_status hash_status;
+ const struct amdgpu_df_funcs *funcs;
+};
+
+#endif /* __AMDGPU_DF_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
new file mode 100644
index 000000000000..73401f0aeb34
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -0,0 +1,3157 @@
+/*
+ * Copyright 2018-2024 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+
+#include "amdgpu.h"
+#include "amdgpu_discovery.h"
+#include "soc15_hw_ip.h"
+#include "discovery.h"
+#include "amdgpu_ras.h"
+
+#include "soc15.h"
+#include "gfx_v9_0.h"
+#include "gfx_v9_4_3.h"
+#include "gmc_v9_0.h"
+#include "df_v1_7.h"
+#include "df_v3_6.h"
+#include "df_v4_3.h"
+#include "df_v4_6_2.h"
+#include "df_v4_15.h"
+#include "nbio_v6_1.h"
+#include "nbio_v7_0.h"
+#include "nbio_v7_4.h"
+#include "nbio_v7_9.h"
+#include "nbio_v7_11.h"
+#include "hdp_v4_0.h"
+#include "vega10_ih.h"
+#include "vega20_ih.h"
+#include "sdma_v4_0.h"
+#include "sdma_v4_4_2.h"
+#include "uvd_v7_0.h"
+#include "vce_v4_0.h"
+#include "vcn_v1_0.h"
+#include "vcn_v2_5.h"
+#include "jpeg_v2_5.h"
+#include "smuio_v9_0.h"
+#include "gmc_v10_0.h"
+#include "gmc_v11_0.h"
+#include "gmc_v12_0.h"
+#include "gfxhub_v2_0.h"
+#include "mmhub_v2_0.h"
+#include "nbio_v2_3.h"
+#include "nbio_v4_3.h"
+#include "nbio_v7_2.h"
+#include "nbio_v7_7.h"
+#include "nbif_v6_3_1.h"
+#include "hdp_v5_0.h"
+#include "hdp_v5_2.h"
+#include "hdp_v6_0.h"
+#include "hdp_v7_0.h"
+#include "nv.h"
+#include "soc21.h"
+#include "soc24.h"
+#include "navi10_ih.h"
+#include "ih_v6_0.h"
+#include "ih_v6_1.h"
+#include "ih_v7_0.h"
+#include "gfx_v10_0.h"
+#include "gfx_v11_0.h"
+#include "gfx_v12_0.h"
+#include "sdma_v5_0.h"
+#include "sdma_v5_2.h"
+#include "sdma_v6_0.h"
+#include "sdma_v7_0.h"
+#include "lsdma_v6_0.h"
+#include "lsdma_v7_0.h"
+#include "vcn_v2_0.h"
+#include "jpeg_v2_0.h"
+#include "vcn_v3_0.h"
+#include "jpeg_v3_0.h"
+#include "vcn_v4_0.h"
+#include "jpeg_v4_0.h"
+#include "vcn_v4_0_3.h"
+#include "jpeg_v4_0_3.h"
+#include "vcn_v4_0_5.h"
+#include "jpeg_v4_0_5.h"
+#include "amdgpu_vkms.h"
+#include "mes_v11_0.h"
+#include "mes_v12_0.h"
+#include "smuio_v11_0.h"
+#include "smuio_v11_0_6.h"
+#include "smuio_v13_0.h"
+#include "smuio_v13_0_3.h"
+#include "smuio_v13_0_6.h"
+#include "smuio_v14_0_2.h"
+#include "vcn_v5_0_0.h"
+#include "vcn_v5_0_1.h"
+#include "jpeg_v5_0_0.h"
+#include "jpeg_v5_0_1.h"
+
+#include "amdgpu_vpe.h"
+#if defined(CONFIG_DRM_AMD_ISP)
+#include "amdgpu_isp.h"
+#endif
+
+MODULE_FIRMWARE("amdgpu/ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/vega10_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/vega12_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/vega20_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/raven_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/raven2_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/arcturus_ip_discovery.bin");
+MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin");
+
+#define mmIP_DISCOVERY_VERSION 0x16A00
+#define mmRCC_CONFIG_MEMSIZE 0xde3
+#define mmMP0_SMN_C2PMSG_33 0x16061
+#define mmMM_INDEX 0x0
+#define mmMM_INDEX_HI 0x6
+#define mmMM_DATA 0x1
+
+static const char *hw_id_names[HW_ID_MAX] = {
+ [MP1_HWID] = "MP1",
+ [MP2_HWID] = "MP2",
+ [THM_HWID] = "THM",
+ [SMUIO_HWID] = "SMUIO",
+ [FUSE_HWID] = "FUSE",
+ [CLKA_HWID] = "CLKA",
+ [PWR_HWID] = "PWR",
+ [GC_HWID] = "GC",
+ [UVD_HWID] = "UVD",
+ [AUDIO_AZ_HWID] = "AUDIO_AZ",
+ [ACP_HWID] = "ACP",
+ [DCI_HWID] = "DCI",
+ [DMU_HWID] = "DMU",
+ [DCO_HWID] = "DCO",
+ [DIO_HWID] = "DIO",
+ [XDMA_HWID] = "XDMA",
+ [DCEAZ_HWID] = "DCEAZ",
+ [DAZ_HWID] = "DAZ",
+ [SDPMUX_HWID] = "SDPMUX",
+ [NTB_HWID] = "NTB",
+ [IOHC_HWID] = "IOHC",
+ [L2IMU_HWID] = "L2IMU",
+ [VCE_HWID] = "VCE",
+ [MMHUB_HWID] = "MMHUB",
+ [ATHUB_HWID] = "ATHUB",
+ [DBGU_NBIO_HWID] = "DBGU_NBIO",
+ [DFX_HWID] = "DFX",
+ [DBGU0_HWID] = "DBGU0",
+ [DBGU1_HWID] = "DBGU1",
+ [OSSSYS_HWID] = "OSSSYS",
+ [HDP_HWID] = "HDP",
+ [SDMA0_HWID] = "SDMA0",
+ [SDMA1_HWID] = "SDMA1",
+ [SDMA2_HWID] = "SDMA2",
+ [SDMA3_HWID] = "SDMA3",
+ [LSDMA_HWID] = "LSDMA",
+ [ISP_HWID] = "ISP",
+ [DBGU_IO_HWID] = "DBGU_IO",
+ [DF_HWID] = "DF",
+ [CLKB_HWID] = "CLKB",
+ [FCH_HWID] = "FCH",
+ [DFX_DAP_HWID] = "DFX_DAP",
+ [L1IMU_PCIE_HWID] = "L1IMU_PCIE",
+ [L1IMU_NBIF_HWID] = "L1IMU_NBIF",
+ [L1IMU_IOAGR_HWID] = "L1IMU_IOAGR",
+ [L1IMU3_HWID] = "L1IMU3",
+ [L1IMU4_HWID] = "L1IMU4",
+ [L1IMU5_HWID] = "L1IMU5",
+ [L1IMU6_HWID] = "L1IMU6",
+ [L1IMU7_HWID] = "L1IMU7",
+ [L1IMU8_HWID] = "L1IMU8",
+ [L1IMU9_HWID] = "L1IMU9",
+ [L1IMU10_HWID] = "L1IMU10",
+ [L1IMU11_HWID] = "L1IMU11",
+ [L1IMU12_HWID] = "L1IMU12",
+ [L1IMU13_HWID] = "L1IMU13",
+ [L1IMU14_HWID] = "L1IMU14",
+ [L1IMU15_HWID] = "L1IMU15",
+ [WAFLC_HWID] = "WAFLC",
+ [FCH_USB_PD_HWID] = "FCH_USB_PD",
+ [PCIE_HWID] = "PCIE",
+ [PCS_HWID] = "PCS",
+ [DDCL_HWID] = "DDCL",
+ [SST_HWID] = "SST",
+ [IOAGR_HWID] = "IOAGR",
+ [NBIF_HWID] = "NBIF",
+ [IOAPIC_HWID] = "IOAPIC",
+ [SYSTEMHUB_HWID] = "SYSTEMHUB",
+ [NTBCCP_HWID] = "NTBCCP",
+ [UMC_HWID] = "UMC",
+ [SATA_HWID] = "SATA",
+ [USB_HWID] = "USB",
+ [CCXSEC_HWID] = "CCXSEC",
+ [XGMI_HWID] = "XGMI",
+ [XGBE_HWID] = "XGBE",
+ [MP0_HWID] = "MP0",
+ [VPE_HWID] = "VPE",
+};
+
+static int hw_id_map[MAX_HWIP] = {
+ [GC_HWIP] = GC_HWID,
+ [HDP_HWIP] = HDP_HWID,
+ [SDMA0_HWIP] = SDMA0_HWID,
+ [SDMA1_HWIP] = SDMA1_HWID,
+ [SDMA2_HWIP] = SDMA2_HWID,
+ [SDMA3_HWIP] = SDMA3_HWID,
+ [LSDMA_HWIP] = LSDMA_HWID,
+ [MMHUB_HWIP] = MMHUB_HWID,
+ [ATHUB_HWIP] = ATHUB_HWID,
+ [NBIO_HWIP] = NBIF_HWID,
+ [MP0_HWIP] = MP0_HWID,
+ [MP1_HWIP] = MP1_HWID,
+ [UVD_HWIP] = UVD_HWID,
+ [VCE_HWIP] = VCE_HWID,
+ [DF_HWIP] = DF_HWID,
+ [DCE_HWIP] = DMU_HWID,
+ [OSSSYS_HWIP] = OSSSYS_HWID,
+ [SMUIO_HWIP] = SMUIO_HWID,
+ [PWR_HWIP] = PWR_HWID,
+ [NBIF_HWIP] = NBIF_HWID,
+ [THM_HWIP] = THM_HWID,
+ [CLK_HWIP] = CLKA_HWID,
+ [UMC_HWIP] = UMC_HWID,
+ [XGMI_HWIP] = XGMI_HWID,
+ [DCI_HWIP] = DCI_HWID,
+ [PCIE_HWIP] = PCIE_HWID,
+ [VPE_HWIP] = VPE_HWID,
+ [ISP_HWIP] = ISP_HWID,
+};
+
+static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
+{
+ u64 tmr_offset, tmr_size, pos;
+ void *discv_regn;
+ int ret;
+
+ ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
+ if (ret)
+ return ret;
+
+ pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
+
+ /* This region is read-only and reserved from system use */
+ discv_regn = memremap(pos, adev->mman.discovery_tmr_size, MEMREMAP_WC);
+ if (discv_regn) {
+ memcpy(binary, discv_regn, adev->mman.discovery_tmr_size);
+ memunmap(discv_regn);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+#define IP_DISCOVERY_V2 2
+#define IP_DISCOVERY_V4 4
+
+static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
+ uint8_t *binary)
+{
+ bool sz_valid = true;
+ uint64_t vram_size;
+ int i, ret = 0;
+ u32 msg;
+
+ if (!amdgpu_sriov_vf(adev)) {
+ /* It can take up to two second for IFWI init to complete on some dGPUs,
+ * but generally it should be in the 60-100ms range. Normally this starts
+ * as soon as the device gets power so by the time the OS loads this has long
+ * completed. However, when a card is hotplugged via e.g., USB4, we need to
+ * wait for this to complete. Once the C2PMSG is updated, we can
+ * continue.
+ */
+
+ for (i = 0; i < 2000; i++) {
+ msg = RREG32(mmMP0_SMN_C2PMSG_33);
+ if (msg & 0x80000000)
+ break;
+ msleep(1);
+ }
+ }
+
+ vram_size = RREG32(mmRCC_CONFIG_MEMSIZE);
+ if (!vram_size || vram_size == U32_MAX)
+ sz_valid = false;
+ else
+ vram_size <<= 20;
+
+ if (sz_valid) {
+ uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
+ amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
+ adev->mman.discovery_tmr_size, false);
+ } else {
+ ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
+ }
+
+ if (ret)
+ dev_err(adev->dev,
+ "failed to read discovery info from memory, vram size read: %llx",
+ vram_size);
+
+ return ret;
+}
+
+static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev,
+ uint8_t *binary,
+ const char *fw_name)
+{
+ const struct firmware *fw;
+ int r;
+
+ r = firmware_request_nowarn(&fw, fw_name, adev->dev);
+ if (r) {
+ if (amdgpu_discovery == 2)
+ dev_err(adev->dev, "can't load firmware \"%s\"\n", fw_name);
+ else
+ drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fw_name);
+ return r;
+ }
+
+ memcpy((u8 *)binary, (u8 *)fw->data, fw->size);
+ release_firmware(fw);
+
+ return 0;
+}
+
+static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
+{
+ uint16_t checksum = 0;
+ int i;
+
+ for (i = 0; i < size; i++)
+ checksum += data[i];
+
+ return checksum;
+}
+
+static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
+ uint16_t expected)
+{
+ return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
+}
+
+static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
+{
+ struct binary_header *bhdr;
+ bhdr = (struct binary_header *)binary;
+
+ return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
+}
+
+static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
+{
+ /*
+ * So far, apply this quirk only on those Navy Flounder boards which
+ * have a bad harvest table of VCN config.
+ */
+ if ((amdgpu_ip_version(adev, UVD_HWIP, 1) == IP_VERSION(3, 0, 1)) &&
+ (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 2))) {
+ switch (adev->pdev->revision) {
+ case 0xC1:
+ case 0xC2:
+ case 0xC3:
+ case 0xC5:
+ case 0xC7:
+ case 0xCF:
+ case 0xDF:
+ adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
+ adev->vcn.inst_mask &= ~AMDGPU_VCN_HARVEST_VCN1;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
+ struct binary_header *bhdr)
+{
+ struct table_info *info;
+ uint16_t checksum;
+ uint16_t offset;
+
+ info = &bhdr->table_list[NPS_INFO];
+ offset = le16_to_cpu(info->offset);
+ checksum = le16_to_cpu(info->checksum);
+
+ struct nps_info_header *nhdr =
+ (struct nps_info_header *)(adev->mman.discovery_bin + offset);
+
+ if (le32_to_cpu(nhdr->table_id) != NPS_INFO_TABLE_ID) {
+ dev_dbg(adev->dev, "invalid ip discovery nps info table id\n");
+ return -EINVAL;
+ }
+
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ le32_to_cpu(nhdr->size_bytes),
+ checksum)) {
+ dev_dbg(adev->dev, "invalid nps info data table checksum\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev)
+{
+ if (amdgpu_discovery == 2)
+ return "amdgpu/ip_discovery.bin";
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ return "amdgpu/vega10_ip_discovery.bin";
+ case CHIP_VEGA12:
+ return "amdgpu/vega12_ip_discovery.bin";
+ case CHIP_RAVEN:
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ return "amdgpu/raven2_ip_discovery.bin";
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
+ return "amdgpu/picasso_ip_discovery.bin";
+ else
+ return "amdgpu/raven_ip_discovery.bin";
+ case CHIP_VEGA20:
+ return "amdgpu/vega20_ip_discovery.bin";
+ case CHIP_ARCTURUS:
+ return "amdgpu/arcturus_ip_discovery.bin";
+ case CHIP_ALDEBARAN:
+ return "amdgpu/aldebaran_ip_discovery.bin";
+ default:
+ return NULL;
+ }
+}
+
+static int amdgpu_discovery_init(struct amdgpu_device *adev)
+{
+ struct table_info *info;
+ struct binary_header *bhdr;
+ const char *fw_name;
+ uint16_t offset;
+ uint16_t size;
+ uint16_t checksum;
+ int r;
+
+ adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
+ adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
+ if (!adev->mman.discovery_bin)
+ return -ENOMEM;
+
+ /* Read from file if it is the preferred option */
+ fw_name = amdgpu_discovery_get_fw_name(adev);
+ if (fw_name != NULL) {
+ drm_dbg(&adev->ddev, "use ip discovery information from file");
+ r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name);
+ if (r)
+ goto out;
+ } else {
+ drm_dbg(&adev->ddev, "use ip discovery information from memory");
+ r = amdgpu_discovery_read_binary_from_mem(
+ adev, adev->mman.discovery_bin);
+ if (r)
+ goto out;
+ }
+
+ /* check the ip discovery binary signature */
+ if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
+ dev_err(adev->dev,
+ "get invalid ip discovery binary signature\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+
+ offset = offsetof(struct binary_header, binary_checksum) +
+ sizeof(bhdr->binary_checksum);
+ size = le16_to_cpu(bhdr->binary_size) - offset;
+ checksum = le16_to_cpu(bhdr->binary_checksum);
+
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ size, checksum)) {
+ dev_err(adev->dev, "invalid ip discovery binary checksum\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ info = &bhdr->table_list[IP_DISCOVERY];
+ offset = le16_to_cpu(info->offset);
+ checksum = le16_to_cpu(info->checksum);
+
+ if (offset) {
+ struct ip_discovery_header *ihdr =
+ (struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
+ if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
+ dev_err(adev->dev, "invalid ip discovery data table signature\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ le16_to_cpu(ihdr->size), checksum)) {
+ dev_err(adev->dev, "invalid ip discovery data table checksum\n");
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ info = &bhdr->table_list[GC];
+ offset = le16_to_cpu(info->offset);
+ checksum = le16_to_cpu(info->checksum);
+
+ if (offset) {
+ struct gpu_info_header *ghdr =
+ (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
+
+ if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
+ dev_err(adev->dev, "invalid ip discovery gc table id\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ le32_to_cpu(ghdr->size), checksum)) {
+ dev_err(adev->dev, "invalid gc data table checksum\n");
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ info = &bhdr->table_list[HARVEST_INFO];
+ offset = le16_to_cpu(info->offset);
+ checksum = le16_to_cpu(info->checksum);
+
+ if (offset) {
+ struct harvest_info_header *hhdr =
+ (struct harvest_info_header *)(adev->mman.discovery_bin + offset);
+
+ if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
+ dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ sizeof(struct harvest_table), checksum)) {
+ dev_err(adev->dev, "invalid harvest data table checksum\n");
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ info = &bhdr->table_list[VCN_INFO];
+ offset = le16_to_cpu(info->offset);
+ checksum = le16_to_cpu(info->checksum);
+
+ if (offset) {
+ struct vcn_info_header *vhdr =
+ (struct vcn_info_header *)(adev->mman.discovery_bin + offset);
+
+ if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
+ dev_err(adev->dev, "invalid ip discovery vcn table id\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ le32_to_cpu(vhdr->size_bytes), checksum)) {
+ dev_err(adev->dev, "invalid vcn data table checksum\n");
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ info = &bhdr->table_list[MALL_INFO];
+ offset = le16_to_cpu(info->offset);
+ checksum = le16_to_cpu(info->checksum);
+
+ if (0 && offset) {
+ struct mall_info_header *mhdr =
+ (struct mall_info_header *)(adev->mman.discovery_bin + offset);
+
+ if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
+ dev_err(adev->dev, "invalid ip discovery mall table id\n");
+ r = -EINVAL;
+ goto out;
+ }
+
+ if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
+ le32_to_cpu(mhdr->size_bytes), checksum)) {
+ dev_err(adev->dev, "invalid mall data table checksum\n");
+ r = -EINVAL;
+ goto out;
+ }
+ }
+
+ return 0;
+
+out:
+ kfree(adev->mman.discovery_bin);
+ adev->mman.discovery_bin = NULL;
+ if ((amdgpu_discovery != 2) &&
+ (RREG32(mmIP_DISCOVERY_VERSION) == 4))
+ amdgpu_ras_query_boot_status(adev, 4);
+ return r;
+}
+
+static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
+
+void amdgpu_discovery_fini(struct amdgpu_device *adev)
+{
+ amdgpu_discovery_sysfs_fini(adev);
+ kfree(adev->mman.discovery_bin);
+ adev->mman.discovery_bin = NULL;
+}
+
+static int amdgpu_discovery_validate_ip(struct amdgpu_device *adev,
+ uint8_t instance, uint16_t hw_id)
+{
+ if (instance >= HWIP_MAX_INSTANCE) {
+ dev_err(adev->dev,
+ "Unexpected instance_number (%d) from ip discovery blob\n",
+ instance);
+ return -EINVAL;
+ }
+ if (hw_id >= HW_ID_MAX) {
+ dev_err(adev->dev,
+ "Unexpected hw_id (%d) from ip discovery blob\n",
+ hw_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
+ uint32_t *vcn_harvest_count)
+{
+ struct binary_header *bhdr;
+ struct ip_discovery_header *ihdr;
+ struct die_header *dhdr;
+ struct ip *ip;
+ uint16_t die_offset, ip_offset, num_dies, num_ips;
+ uint16_t hw_id;
+ uint8_t inst;
+ int i, j;
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ num_dies = le16_to_cpu(ihdr->num_dies);
+
+ /* scan harvest bit of all IP data structures */
+ for (i = 0; i < num_dies; i++) {
+ die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
+ dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ num_ips = le16_to_cpu(dhdr->num_ips);
+ ip_offset = die_offset + sizeof(*dhdr);
+
+ for (j = 0; j < num_ips; j++) {
+ ip = (struct ip *)(adev->mman.discovery_bin +
+ ip_offset);
+ inst = ip->number_instance;
+ hw_id = le16_to_cpu(ip->hw_id);
+ if (amdgpu_discovery_validate_ip(adev, inst, hw_id))
+ goto next_ip;
+
+ if (ip->harvest == 1) {
+ switch (hw_id) {
+ case VCN_HWID:
+ (*vcn_harvest_count)++;
+ if (inst == 0) {
+ adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
+ adev->vcn.inst_mask &=
+ ~AMDGPU_VCN_HARVEST_VCN0;
+ adev->jpeg.inst_mask &=
+ ~AMDGPU_VCN_HARVEST_VCN0;
+ } else {
+ adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
+ adev->vcn.inst_mask &=
+ ~AMDGPU_VCN_HARVEST_VCN1;
+ adev->jpeg.inst_mask &=
+ ~AMDGPU_VCN_HARVEST_VCN1;
+ }
+ break;
+ case DMU_HWID:
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
+ break;
+ default:
+ break;
+ }
+ }
+next_ip:
+ ip_offset += struct_size(ip, base_address,
+ ip->num_base_address);
+ }
+ }
+}
+
+static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
+ uint32_t *vcn_harvest_count,
+ uint32_t *umc_harvest_count)
+{
+ struct binary_header *bhdr;
+ struct harvest_table *harvest_info;
+ u16 offset;
+ int i;
+ uint32_t umc_harvest_config = 0;
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
+
+ if (!offset) {
+ dev_err(adev->dev, "invalid harvest table offset\n");
+ return;
+ }
+
+ harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
+
+ for (i = 0; i < 32; i++) {
+ if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
+ break;
+
+ switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
+ case VCN_HWID:
+ (*vcn_harvest_count)++;
+ adev->vcn.harvest_config |=
+ (1 << harvest_info->list[i].number_instance);
+ adev->jpeg.harvest_config |=
+ (1 << harvest_info->list[i].number_instance);
+
+ adev->vcn.inst_mask &=
+ ~(1U << harvest_info->list[i].number_instance);
+ adev->jpeg.inst_mask &=
+ ~(1U << harvest_info->list[i].number_instance);
+ break;
+ case DMU_HWID:
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
+ break;
+ case UMC_HWID:
+ umc_harvest_config |=
+ 1 << (le16_to_cpu(harvest_info->list[i].number_instance));
+ (*umc_harvest_count)++;
+ break;
+ case GC_HWID:
+ adev->gfx.xcc_mask &=
+ ~(1U << harvest_info->list[i].number_instance);
+ break;
+ case SDMA0_HWID:
+ adev->sdma.sdma_mask &=
+ ~(1U << harvest_info->list[i].number_instance);
+ break;
+#if defined(CONFIG_DRM_AMD_ISP)
+ case ISP_HWID:
+ adev->isp.harvest_config |=
+ ~(1U << harvest_info->list[i].number_instance);
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+
+ adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
+ ~umc_harvest_config;
+}
+
+/* ================================================== */
+
+struct ip_hw_instance {
+ struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
+
+ int hw_id;
+ u8 num_instance;
+ u8 major, minor, revision;
+ u8 harvest;
+
+ int num_base_addresses;
+ u32 base_addr[] __counted_by(num_base_addresses);
+};
+
+struct ip_hw_id {
+ struct kset hw_id_kset; /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
+ int hw_id;
+};
+
+struct ip_die_entry {
+ struct kset ip_kset; /* ip_discovery/die/#die/, contains ip_hw_id */
+ u16 num_ips;
+};
+
+/* -------------------------------------------------- */
+
+struct ip_hw_instance_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
+};
+
+static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
+}
+
+static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
+}
+
+static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
+}
+
+static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
+}
+
+static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
+}
+
+static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
+}
+
+static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
+}
+
+static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
+{
+ ssize_t res, at;
+ int ii;
+
+ for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
+ /* Here we satisfy the condition that, at + size <= PAGE_SIZE.
+ */
+ if (at + 12 > PAGE_SIZE)
+ break;
+ res = sysfs_emit_at(buf, at, "0x%08X\n",
+ ip_hw_instance->base_addr[ii]);
+ if (res <= 0)
+ break;
+ at += res;
+ }
+
+ return res < 0 ? res : at;
+}
+
+static struct ip_hw_instance_attr ip_hw_attr[] = {
+ __ATTR_RO(hw_id),
+ __ATTR_RO(num_instance),
+ __ATTR_RO(major),
+ __ATTR_RO(minor),
+ __ATTR_RO(revision),
+ __ATTR_RO(harvest),
+ __ATTR_RO(num_base_addresses),
+ __ATTR_RO(base_addr),
+};
+
+static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
+ATTRIBUTE_GROUPS(ip_hw_instance);
+
+#define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
+#define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
+
+static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
+ struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
+
+ if (!ip_hw_attr->show)
+ return -EIO;
+
+ return ip_hw_attr->show(ip_hw_instance, buf);
+}
+
+static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
+ .show = ip_hw_instance_attr_show,
+};
+
+static void ip_hw_instance_release(struct kobject *kobj)
+{
+ struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
+
+ kfree(ip_hw_instance);
+}
+
+static const struct kobj_type ip_hw_instance_ktype = {
+ .release = ip_hw_instance_release,
+ .sysfs_ops = &ip_hw_instance_sysfs_ops,
+ .default_groups = ip_hw_instance_groups,
+};
+
+/* -------------------------------------------------- */
+
+#define to_ip_hw_id(x) container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
+
+static void ip_hw_id_release(struct kobject *kobj)
+{
+ struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
+
+ if (!list_empty(&ip_hw_id->hw_id_kset.list))
+ DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
+ kfree(ip_hw_id);
+}
+
+static const struct kobj_type ip_hw_id_ktype = {
+ .release = ip_hw_id_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+/* -------------------------------------------------- */
+
+static void die_kobj_release(struct kobject *kobj);
+static void ip_disc_release(struct kobject *kobj);
+
+struct ip_die_entry_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
+};
+
+#define to_ip_die_entry_attr(x) container_of(x, struct ip_die_entry_attribute, attr)
+
+static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
+}
+
+/* If there are more ip_die_entry attrs, other than the number of IPs,
+ * we can make this intro an array of attrs, and then initialize
+ * ip_die_entry_attrs in a loop.
+ */
+static struct ip_die_entry_attribute num_ips_attr =
+ __ATTR_RO(num_ips);
+
+static struct attribute *ip_die_entry_attrs[] = {
+ &num_ips_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
+
+#define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
+
+static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
+ struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
+
+ if (!ip_die_entry_attr->show)
+ return -EIO;
+
+ return ip_die_entry_attr->show(ip_die_entry, buf);
+}
+
+static void ip_die_entry_release(struct kobject *kobj)
+{
+ struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
+
+ if (!list_empty(&ip_die_entry->ip_kset.list))
+ DRM_ERROR("ip_die_entry->ip_kset is not empty");
+ kfree(ip_die_entry);
+}
+
+static const struct sysfs_ops ip_die_entry_sysfs_ops = {
+ .show = ip_die_entry_attr_show,
+};
+
+static const struct kobj_type ip_die_entry_ktype = {
+ .release = ip_die_entry_release,
+ .sysfs_ops = &ip_die_entry_sysfs_ops,
+ .default_groups = ip_die_entry_groups,
+};
+
+static const struct kobj_type die_kobj_ktype = {
+ .release = die_kobj_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+static const struct kobj_type ip_discovery_ktype = {
+ .release = ip_disc_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+struct ip_discovery_top {
+ struct kobject kobj; /* ip_discovery/ */
+ struct kset die_kset; /* ip_discovery/die/, contains ip_die_entry */
+ struct amdgpu_device *adev;
+};
+
+static void die_kobj_release(struct kobject *kobj)
+{
+ struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
+ struct ip_discovery_top,
+ die_kset);
+ if (!list_empty(&ip_top->die_kset.list))
+ DRM_ERROR("ip_top->die_kset is not empty");
+}
+
+static void ip_disc_release(struct kobject *kobj)
+{
+ struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
+ kobj);
+ struct amdgpu_device *adev = ip_top->adev;
+
+ adev->ip_top = NULL;
+ kfree(ip_top);
+}
+
+static uint8_t amdgpu_discovery_get_harvest_info(struct amdgpu_device *adev,
+ uint16_t hw_id, uint8_t inst)
+{
+ uint8_t harvest = 0;
+
+ /* Until a uniform way is figured, get mask based on hwid */
+ switch (hw_id) {
+ case VCN_HWID:
+ harvest = ((1 << inst) & adev->vcn.inst_mask) == 0;
+ break;
+ case DMU_HWID:
+ if (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)
+ harvest = 0x1;
+ break;
+ case UMC_HWID:
+ /* TODO: It needs another parsing; for now, ignore.*/
+ break;
+ case GC_HWID:
+ harvest = ((1 << inst) & adev->gfx.xcc_mask) == 0;
+ break;
+ case SDMA0_HWID:
+ harvest = ((1 << inst) & adev->sdma.sdma_mask) == 0;
+ break;
+ default:
+ break;
+ }
+
+ return harvest;
+}
+
+static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
+ struct ip_die_entry *ip_die_entry,
+ const size_t _ip_offset, const int num_ips,
+ bool reg_base_64)
+{
+ int ii, jj, kk, res;
+ uint16_t hw_id;
+ uint8_t inst;
+
+ DRM_DEBUG("num_ips:%d", num_ips);
+
+ /* Find all IPs of a given HW ID, and add their instance to
+ * #die/#hw_id/#instance/<attributes>
+ */
+ for (ii = 0; ii < HW_ID_MAX; ii++) {
+ struct ip_hw_id *ip_hw_id = NULL;
+ size_t ip_offset = _ip_offset;
+
+ for (jj = 0; jj < num_ips; jj++) {
+ struct ip_v4 *ip;
+ struct ip_hw_instance *ip_hw_instance;
+
+ ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
+ inst = ip->instance_number;
+ hw_id = le16_to_cpu(ip->hw_id);
+ if (amdgpu_discovery_validate_ip(adev, inst, hw_id) ||
+ hw_id != ii)
+ goto next_ip;
+
+ DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
+
+ /* We have a hw_id match; register the hw
+ * block if not yet registered.
+ */
+ if (!ip_hw_id) {
+ ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
+ if (!ip_hw_id)
+ return -ENOMEM;
+ ip_hw_id->hw_id = ii;
+
+ kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
+ ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
+ ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
+ res = kset_register(&ip_hw_id->hw_id_kset);
+ if (res) {
+ DRM_ERROR("Couldn't register ip_hw_id kset");
+ kfree(ip_hw_id);
+ return res;
+ }
+ if (hw_id_names[ii]) {
+ res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
+ &ip_hw_id->hw_id_kset.kobj,
+ hw_id_names[ii]);
+ if (res) {
+ DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
+ hw_id_names[ii],
+ kobject_name(&ip_die_entry->ip_kset.kobj));
+ }
+ }
+ }
+
+ /* Now register its instance.
+ */
+ ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
+ base_addr,
+ ip->num_base_address),
+ GFP_KERNEL);
+ if (!ip_hw_instance) {
+ DRM_ERROR("no memory for ip_hw_instance");
+ return -ENOMEM;
+ }
+ ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
+ ip_hw_instance->num_instance = ip->instance_number;
+ ip_hw_instance->major = ip->major;
+ ip_hw_instance->minor = ip->minor;
+ ip_hw_instance->revision = ip->revision;
+ ip_hw_instance->harvest =
+ amdgpu_discovery_get_harvest_info(
+ adev, ip_hw_instance->hw_id,
+ ip_hw_instance->num_instance);
+ ip_hw_instance->num_base_addresses = ip->num_base_address;
+
+ for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
+ if (reg_base_64)
+ ip_hw_instance->base_addr[kk] =
+ lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
+ else
+ ip_hw_instance->base_addr[kk] = ip->base_address[kk];
+ }
+
+ kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
+ ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
+ res = kobject_add(&ip_hw_instance->kobj, NULL,
+ "%d", ip_hw_instance->num_instance);
+next_ip:
+ if (reg_base_64)
+ ip_offset += struct_size(ip, base_address_64,
+ ip->num_base_address);
+ else
+ ip_offset += struct_size(ip, base_address,
+ ip->num_base_address);
+ }
+ }
+
+ return 0;
+}
+
+static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
+{
+ struct binary_header *bhdr;
+ struct ip_discovery_header *ihdr;
+ struct die_header *dhdr;
+ struct kset *die_kset = &adev->ip_top->die_kset;
+ u16 num_dies, die_offset, num_ips;
+ size_t ip_offset;
+ int ii, res;
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ num_dies = le16_to_cpu(ihdr->num_dies);
+
+ DRM_DEBUG("number of dies: %d\n", num_dies);
+
+ for (ii = 0; ii < num_dies; ii++) {
+ struct ip_die_entry *ip_die_entry;
+
+ die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
+ dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ num_ips = le16_to_cpu(dhdr->num_ips);
+ ip_offset = die_offset + sizeof(*dhdr);
+
+ /* Add the die to the kset.
+ *
+ * dhdr->die_id == ii, which was checked in
+ * amdgpu_discovery_reg_base_init().
+ */
+
+ ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
+ if (!ip_die_entry)
+ return -ENOMEM;
+
+ ip_die_entry->num_ips = num_ips;
+
+ kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
+ ip_die_entry->ip_kset.kobj.kset = die_kset;
+ ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
+ res = kset_register(&ip_die_entry->ip_kset);
+ if (res) {
+ DRM_ERROR("Couldn't register ip_die_entry kset");
+ kfree(ip_die_entry);
+ return res;
+ }
+
+ amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips, !!ihdr->base_addr_64_bit);
+ }
+
+ return 0;
+}
+
+static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
+{
+ struct kset *die_kset;
+ int res, ii;
+
+ if (!adev->mman.discovery_bin)
+ return -EINVAL;
+
+ adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
+ if (!adev->ip_top)
+ return -ENOMEM;
+
+ adev->ip_top->adev = adev;
+
+ res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
+ &adev->dev->kobj, "ip_discovery");
+ if (res) {
+ DRM_ERROR("Couldn't init and add ip_discovery/");
+ goto Err;
+ }
+
+ die_kset = &adev->ip_top->die_kset;
+ kobject_set_name(&die_kset->kobj, "%s", "die");
+ die_kset->kobj.parent = &adev->ip_top->kobj;
+ die_kset->kobj.ktype = &die_kobj_ktype;
+ res = kset_register(&adev->ip_top->die_kset);
+ if (res) {
+ DRM_ERROR("Couldn't register die_kset");
+ goto Err;
+ }
+
+ for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
+ ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
+ ip_hw_instance_attrs[ii] = NULL;
+
+ res = amdgpu_discovery_sysfs_recurse(adev);
+
+ return res;
+Err:
+ kobject_put(&adev->ip_top->kobj);
+ return res;
+}
+
+/* -------------------------------------------------- */
+
+#define list_to_kobj(el) container_of(el, struct kobject, entry)
+
+static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
+{
+ struct list_head *el, *tmp;
+ struct kset *hw_id_kset;
+
+ hw_id_kset = &ip_hw_id->hw_id_kset;
+ spin_lock(&hw_id_kset->list_lock);
+ list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
+ list_del_init(el);
+ spin_unlock(&hw_id_kset->list_lock);
+ /* kobject is embedded in ip_hw_instance */
+ kobject_put(list_to_kobj(el));
+ spin_lock(&hw_id_kset->list_lock);
+ }
+ spin_unlock(&hw_id_kset->list_lock);
+ kobject_put(&ip_hw_id->hw_id_kset.kobj);
+}
+
+static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
+{
+ struct list_head *el, *tmp;
+ struct kset *ip_kset;
+
+ ip_kset = &ip_die_entry->ip_kset;
+ spin_lock(&ip_kset->list_lock);
+ list_for_each_prev_safe(el, tmp, &ip_kset->list) {
+ list_del_init(el);
+ spin_unlock(&ip_kset->list_lock);
+ amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
+ spin_lock(&ip_kset->list_lock);
+ }
+ spin_unlock(&ip_kset->list_lock);
+ kobject_put(&ip_die_entry->ip_kset.kobj);
+}
+
+static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
+{
+ struct list_head *el, *tmp;
+ struct kset *die_kset;
+
+ die_kset = &adev->ip_top->die_kset;
+ spin_lock(&die_kset->list_lock);
+ list_for_each_prev_safe(el, tmp, &die_kset->list) {
+ list_del_init(el);
+ spin_unlock(&die_kset->list_lock);
+ amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
+ spin_lock(&die_kset->list_lock);
+ }
+ spin_unlock(&die_kset->list_lock);
+ kobject_put(&adev->ip_top->die_kset.kobj);
+ kobject_put(&adev->ip_top->kobj);
+}
+
+/* ================================================== */
+
+static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
+{
+ uint8_t num_base_address, subrev, variant;
+ struct binary_header *bhdr;
+ struct ip_discovery_header *ihdr;
+ struct die_header *dhdr;
+ struct ip_v4 *ip;
+ uint16_t die_offset;
+ uint16_t ip_offset;
+ uint16_t num_dies;
+ uint32_t wafl_ver;
+ uint16_t num_ips;
+ uint16_t hw_id;
+ uint8_t inst;
+ int hw_ip;
+ int i, j, k;
+ int r;
+
+ r = amdgpu_discovery_init(adev);
+ if (r)
+ return r;
+
+ wafl_ver = 0;
+ adev->gfx.xcc_mask = 0;
+ adev->sdma.sdma_mask = 0;
+ adev->vcn.inst_mask = 0;
+ adev->jpeg.inst_mask = 0;
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
+ le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ num_dies = le16_to_cpu(ihdr->num_dies);
+
+ DRM_DEBUG("number of dies: %d\n", num_dies);
+
+ for (i = 0; i < num_dies; i++) {
+ die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
+ dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
+ num_ips = le16_to_cpu(dhdr->num_ips);
+ ip_offset = die_offset + sizeof(*dhdr);
+
+ if (le16_to_cpu(dhdr->die_id) != i) {
+ DRM_ERROR("invalid die id %d, expected %d\n",
+ le16_to_cpu(dhdr->die_id), i);
+ return -EINVAL;
+ }
+
+ DRM_DEBUG("number of hardware IPs on die%d: %d\n",
+ le16_to_cpu(dhdr->die_id), num_ips);
+
+ for (j = 0; j < num_ips; j++) {
+ ip = (struct ip_v4 *)(adev->mman.discovery_bin + ip_offset);
+
+ inst = ip->instance_number;
+ hw_id = le16_to_cpu(ip->hw_id);
+ if (amdgpu_discovery_validate_ip(adev, inst, hw_id))
+ goto next_ip;
+
+ num_base_address = ip->num_base_address;
+
+ DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
+ hw_id_names[le16_to_cpu(ip->hw_id)],
+ le16_to_cpu(ip->hw_id),
+ ip->instance_number,
+ ip->major, ip->minor,
+ ip->revision);
+
+ if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
+ /* Bit [5:0]: original revision value
+ * Bit [7:6]: en/decode capability:
+ * 0b00 : VCN function normally
+ * 0b10 : encode is disabled
+ * 0b01 : decode is disabled
+ */
+ if (adev->vcn.num_vcn_inst <
+ AMDGPU_MAX_VCN_INSTANCES) {
+ adev->vcn.inst[adev->vcn.num_vcn_inst].vcn_config =
+ ip->revision & 0xc0;
+ adev->vcn.num_vcn_inst++;
+ adev->vcn.inst_mask |=
+ (1U << ip->instance_number);
+ adev->jpeg.inst_mask |=
+ (1U << ip->instance_number);
+ } else {
+ dev_err(adev->dev, "Too many VCN instances: %d vs %d\n",
+ adev->vcn.num_vcn_inst + 1,
+ AMDGPU_MAX_VCN_INSTANCES);
+ }
+ ip->revision &= ~0xc0;
+ }
+ if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
+ le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
+ le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
+ le16_to_cpu(ip->hw_id) == SDMA3_HWID) {
+ if (adev->sdma.num_instances <
+ AMDGPU_MAX_SDMA_INSTANCES) {
+ adev->sdma.num_instances++;
+ adev->sdma.sdma_mask |=
+ (1U << ip->instance_number);
+ } else {
+ dev_err(adev->dev, "Too many SDMA instances: %d vs %d\n",
+ adev->sdma.num_instances + 1,
+ AMDGPU_MAX_SDMA_INSTANCES);
+ }
+ }
+
+ if (le16_to_cpu(ip->hw_id) == VPE_HWID) {
+ if (adev->vpe.num_instances < AMDGPU_MAX_VPE_INSTANCES)
+ adev->vpe.num_instances++;
+ else
+ dev_err(adev->dev, "Too many VPE instances: %d vs %d\n",
+ adev->vpe.num_instances + 1,
+ AMDGPU_MAX_VPE_INSTANCES);
+ }
+
+ if (le16_to_cpu(ip->hw_id) == UMC_HWID) {
+ adev->gmc.num_umc++;
+ adev->umc.node_inst_num++;
+ }
+
+ if (le16_to_cpu(ip->hw_id) == GC_HWID)
+ adev->gfx.xcc_mask |=
+ (1U << ip->instance_number);
+
+ if (!wafl_ver && le16_to_cpu(ip->hw_id) == WAFLC_HWID)
+ wafl_ver = IP_VERSION_FULL(ip->major, ip->minor,
+ ip->revision, 0, 0);
+
+ for (k = 0; k < num_base_address; k++) {
+ /*
+ * convert the endianness of base addresses in place,
+ * so that we don't need to convert them when accessing adev->reg_offset.
+ */
+ if (ihdr->base_addr_64_bit)
+ /* Truncate the 64bit base address from ip discovery
+ * and only store lower 32bit ip base in reg_offset[].
+ * Bits > 32 follows ASIC specific format, thus just
+ * discard them and handle it within specific ASIC.
+ * By this way reg_offset[] and related helpers can
+ * stay unchanged.
+ * The base address is in dwords, thus clear the
+ * highest 2 bits to store.
+ */
+ ip->base_address[k] =
+ lower_32_bits(le64_to_cpu(ip->base_address_64[k])) & 0x3FFFFFFF;
+ else
+ ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
+ DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
+ }
+
+ for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
+ if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id) &&
+ hw_id_map[hw_ip] != 0) {
+ DRM_DEBUG("set register base offset for %s\n",
+ hw_id_names[le16_to_cpu(ip->hw_id)]);
+ adev->reg_offset[hw_ip][ip->instance_number] =
+ ip->base_address;
+ /* Instance support is somewhat inconsistent.
+ * SDMA is a good example. Sienna cichlid has 4 total
+ * SDMA instances, each enumerated separately (HWIDs
+ * 42, 43, 68, 69). Arcturus has 8 total SDMA instances,
+ * but they are enumerated as multiple instances of the
+ * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another
+ * example. On most chips there are multiple instances
+ * with the same HWID.
+ */
+
+ if (ihdr->version < 3) {
+ subrev = 0;
+ variant = 0;
+ } else {
+ subrev = ip->sub_revision;
+ variant = ip->variant;
+ }
+
+ adev->ip_versions[hw_ip]
+ [ip->instance_number] =
+ IP_VERSION_FULL(ip->major,
+ ip->minor,
+ ip->revision,
+ variant,
+ subrev);
+ }
+ }
+
+next_ip:
+ if (ihdr->base_addr_64_bit)
+ ip_offset += struct_size(ip, base_address_64, ip->num_base_address);
+ else
+ ip_offset += struct_size(ip, base_address, ip->num_base_address);
+ }
+ }
+
+ if (wafl_ver && !adev->ip_versions[XGMI_HWIP][0])
+ adev->ip_versions[XGMI_HWIP][0] = wafl_ver;
+
+ return 0;
+}
+
+static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
+{
+ struct ip_discovery_header *ihdr;
+ struct binary_header *bhdr;
+ int vcn_harvest_count = 0;
+ int umc_harvest_count = 0;
+ uint16_t offset, ihdr_ver;
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ offset = le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset);
+ ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
+ offset);
+ ihdr_ver = le16_to_cpu(ihdr->version);
+ /*
+ * Harvest table does not fit Navi1x and legacy GPUs,
+ * so read harvest bit per IP data structure to set
+ * harvest configuration.
+ */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 2, 0) &&
+ ihdr_ver <= 2) {
+ if ((adev->pdev->device == 0x731E &&
+ (adev->pdev->revision == 0xC6 ||
+ adev->pdev->revision == 0xC7)) ||
+ (adev->pdev->device == 0x7340 &&
+ adev->pdev->revision == 0xC9) ||
+ (adev->pdev->device == 0x7360 &&
+ adev->pdev->revision == 0xC7))
+ amdgpu_discovery_read_harvest_bit_per_ip(adev,
+ &vcn_harvest_count);
+ } else {
+ amdgpu_discovery_read_from_harvest_table(adev,
+ &vcn_harvest_count,
+ &umc_harvest_count);
+ }
+
+ amdgpu_discovery_harvest_config_quirk(adev);
+
+ if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
+ }
+
+ if (umc_harvest_count < adev->gmc.num_umc) {
+ adev->gmc.num_umc -= umc_harvest_count;
+ }
+}
+
+union gc_info {
+ struct gc_info_v1_0 v1;
+ struct gc_info_v1_1 v1_1;
+ struct gc_info_v1_2 v1_2;
+ struct gc_info_v1_3 v1_3;
+ struct gc_info_v2_0 v2;
+ struct gc_info_v2_1 v2_1;
+};
+
+static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
+{
+ struct binary_header *bhdr;
+ union gc_info *gc_info;
+ u16 offset;
+
+ if (!adev->mman.discovery_bin) {
+ DRM_ERROR("ip discovery uninitialized\n");
+ return -EINVAL;
+ }
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ offset = le16_to_cpu(bhdr->table_list[GC].offset);
+
+ if (!offset)
+ return 0;
+
+ gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
+
+ switch (le16_to_cpu(gc_info->v1.header.version_major)) {
+ case 1:
+ adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
+ adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
+ le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
+ adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
+ adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
+ adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
+ adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
+ adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
+ adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
+ adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
+ adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
+ adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
+ adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
+ adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
+ adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
+ adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
+ le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
+ adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
+ if (le16_to_cpu(gc_info->v1.header.version_minor) >= 1) {
+ adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
+ adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
+ adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
+ }
+ if (le16_to_cpu(gc_info->v1.header.version_minor) >= 2) {
+ adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
+ adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
+ adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
+ adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
+ adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
+ adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
+ adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
+ adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
+ }
+ if (le16_to_cpu(gc_info->v1.header.version_minor) >= 3) {
+ adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v1_3.gc_tcp_size_per_cu);
+ adev->gfx.config.gc_tcp_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcp_cache_line_size);
+ adev->gfx.config.gc_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_size_per_sqc);
+ adev->gfx.config.gc_instruction_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_instruction_cache_line_size);
+ adev->gfx.config.gc_scalar_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_size_per_sqc);
+ adev->gfx.config.gc_scalar_data_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_scalar_data_cache_line_size);
+ adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v1_3.gc_tcc_size);
+ adev->gfx.config.gc_tcc_cache_line_size = le32_to_cpu(gc_info->v1_3.gc_tcc_cache_line_size);
+ }
+ break;
+ case 2:
+ adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
+ adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
+ adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
+ adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
+ adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
+ adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
+ adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
+ adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
+ adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
+ adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
+ adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
+ adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
+ adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
+ adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
+ adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
+ le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
+ adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
+ if (le16_to_cpu(gc_info->v2.header.version_minor) == 1) {
+ adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v2_1.gc_num_tcp_per_sh);
+ adev->gfx.config.gc_tcp_size_per_cu = le32_to_cpu(gc_info->v2_1.gc_tcp_size_per_cu);
+ adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v2_1.gc_num_sdp_interface); /* per XCD */
+ adev->gfx.config.gc_num_cu_per_sqc = le32_to_cpu(gc_info->v2_1.gc_num_cu_per_sqc);
+ adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_instruction_cache_size_per_sqc);
+ adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v2_1.gc_scalar_data_cache_size_per_sqc);
+ adev->gfx.config.gc_tcc_size = le32_to_cpu(gc_info->v2_1.gc_tcc_size); /* per XCD */
+ }
+ break;
+ default:
+ dev_err(adev->dev,
+ "Unhandled GC info table %d.%d\n",
+ le16_to_cpu(gc_info->v1.header.version_major),
+ le16_to_cpu(gc_info->v1.header.version_minor));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+union mall_info {
+ struct mall_info_v1_0 v1;
+ struct mall_info_v2_0 v2;
+};
+
+static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
+{
+ struct binary_header *bhdr;
+ union mall_info *mall_info;
+ u32 u, mall_size_per_umc, m_s_present, half_use;
+ u64 mall_size;
+ u16 offset;
+
+ if (!adev->mman.discovery_bin) {
+ DRM_ERROR("ip discovery uninitialized\n");
+ return -EINVAL;
+ }
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
+
+ if (!offset)
+ return 0;
+
+ mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
+
+ switch (le16_to_cpu(mall_info->v1.header.version_major)) {
+ case 1:
+ mall_size = 0;
+ mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
+ m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
+ half_use = le32_to_cpu(mall_info->v1.m_half_use);
+ for (u = 0; u < adev->gmc.num_umc; u++) {
+ if (m_s_present & (1 << u))
+ mall_size += mall_size_per_umc * 2;
+ else if (half_use & (1 << u))
+ mall_size += mall_size_per_umc / 2;
+ else
+ mall_size += mall_size_per_umc;
+ }
+ adev->gmc.mall_size = mall_size;
+ adev->gmc.m_half_use = half_use;
+ break;
+ case 2:
+ mall_size_per_umc = le32_to_cpu(mall_info->v2.mall_size_per_umc);
+ adev->gmc.mall_size = (uint64_t)mall_size_per_umc * adev->gmc.num_umc;
+ break;
+ default:
+ dev_err(adev->dev,
+ "Unhandled MALL info table %d.%d\n",
+ le16_to_cpu(mall_info->v1.header.version_major),
+ le16_to_cpu(mall_info->v1.header.version_minor));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+union vcn_info {
+ struct vcn_info_v1_0 v1;
+};
+
+static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
+{
+ struct binary_header *bhdr;
+ union vcn_info *vcn_info;
+ u16 offset;
+ int v;
+
+ if (!adev->mman.discovery_bin) {
+ DRM_ERROR("ip discovery uninitialized\n");
+ return -EINVAL;
+ }
+
+ /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
+ * which is smaller than VCN_INFO_TABLE_MAX_NUM_INSTANCES
+ * but that may change in the future with new GPUs so keep this
+ * check for defensive purposes.
+ */
+ if (adev->vcn.num_vcn_inst > VCN_INFO_TABLE_MAX_NUM_INSTANCES) {
+ dev_err(adev->dev, "invalid vcn instances\n");
+ return -EINVAL;
+ }
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
+
+ if (!offset)
+ return 0;
+
+ vcn_info = (union vcn_info *)(adev->mman.discovery_bin + offset);
+
+ switch (le16_to_cpu(vcn_info->v1.header.version_major)) {
+ case 1:
+ /* num_vcn_inst is currently limited to AMDGPU_MAX_VCN_INSTANCES
+ * so this won't overflow.
+ */
+ for (v = 0; v < adev->vcn.num_vcn_inst; v++) {
+ adev->vcn.inst[v].vcn_codec_disable_mask =
+ le32_to_cpu(vcn_info->v1.instance_info[v].fuse_data.all_bits);
+ }
+ break;
+ default:
+ dev_err(adev->dev,
+ "Unhandled VCN info table %d.%d\n",
+ le16_to_cpu(vcn_info->v1.header.version_major),
+ le16_to_cpu(vcn_info->v1.header.version_minor));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+union nps_info {
+ struct nps_info_v1_0 v1;
+};
+
+static int amdgpu_discovery_refresh_nps_info(struct amdgpu_device *adev,
+ union nps_info *nps_data)
+{
+ uint64_t vram_size, pos, offset;
+ struct nps_info_header *nhdr;
+ struct binary_header bhdr;
+ uint16_t checksum;
+
+ vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
+ pos = vram_size - DISCOVERY_TMR_OFFSET;
+ amdgpu_device_vram_access(adev, pos, &bhdr, sizeof(bhdr), false);
+
+ offset = le16_to_cpu(bhdr.table_list[NPS_INFO].offset);
+ checksum = le16_to_cpu(bhdr.table_list[NPS_INFO].checksum);
+
+ amdgpu_device_vram_access(adev, (pos + offset), nps_data,
+ sizeof(*nps_data), false);
+
+ nhdr = (struct nps_info_header *)(nps_data);
+ if (!amdgpu_discovery_verify_checksum((uint8_t *)nps_data,
+ le32_to_cpu(nhdr->size_bytes),
+ checksum)) {
+ dev_err(adev->dev, "nps data refresh, checksum mismatch\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
+ uint32_t *nps_type,
+ struct amdgpu_gmc_memrange **ranges,
+ int *range_cnt, bool refresh)
+{
+ struct amdgpu_gmc_memrange *mem_ranges;
+ struct binary_header *bhdr;
+ union nps_info *nps_info;
+ union nps_info nps_data;
+ u16 offset;
+ int i, r;
+
+ if (!nps_type || !range_cnt || !ranges)
+ return -EINVAL;
+
+ if (refresh) {
+ r = amdgpu_discovery_refresh_nps_info(adev, &nps_data);
+ if (r)
+ return r;
+ nps_info = &nps_data;
+ } else {
+ if (!adev->mman.discovery_bin) {
+ dev_err(adev->dev,
+ "fetch mem range failed, ip discovery uninitialized\n");
+ return -EINVAL;
+ }
+
+ bhdr = (struct binary_header *)adev->mman.discovery_bin;
+ offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset);
+
+ if (!offset)
+ return -ENOENT;
+
+ /* If verification fails, return as if NPS table doesn't exist */
+ if (amdgpu_discovery_verify_npsinfo(adev, bhdr))
+ return -ENOENT;
+
+ nps_info =
+ (union nps_info *)(adev->mman.discovery_bin + offset);
+ }
+
+ switch (le16_to_cpu(nps_info->v1.header.version_major)) {
+ case 1:
+ mem_ranges = kvcalloc(nps_info->v1.count,
+ sizeof(*mem_ranges),
+ GFP_KERNEL);
+ if (!mem_ranges)
+ return -ENOMEM;
+ *nps_type = nps_info->v1.nps_type;
+ *range_cnt = nps_info->v1.count;
+ for (i = 0; i < *range_cnt; i++) {
+ mem_ranges[i].base_address =
+ nps_info->v1.instance_info[i].base_address;
+ mem_ranges[i].limit_address =
+ nps_info->v1.instance_info[i].limit_address;
+ mem_ranges[i].nid_mask = -1;
+ mem_ranges[i].flags = 0;
+ }
+ *ranges = mem_ranges;
+ break;
+ default:
+ dev_err(adev->dev, "Unhandled NPS info table %d.%d\n",
+ le16_to_cpu(nps_info->v1.header.version_major),
+ le16_to_cpu(nps_info->v1.header.version_minor));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
+{
+ /* what IP to use for this? */
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ case IP_VERSION(9, 4, 3):
+ case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
+ case IP_VERSION(10, 3, 7):
+ amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
+ amdgpu_device_ip_block_add(adev, &soc21_common_ip_block);
+ break;
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ amdgpu_device_ip_block_add(adev, &soc24_common_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add common ip block(GC_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, GC_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
+{
+ /* use GC or MMHUB IP version */
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ case IP_VERSION(9, 4, 3):
+ case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
+ case IP_VERSION(10, 3, 7):
+ amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
+ amdgpu_device_ip_block_add(adev, &gmc_v11_0_ip_block);
+ break;
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ amdgpu_device_ip_block_add(adev, &gmc_v12_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev, "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, GC_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, OSSSYS_HWIP, 0)) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 0, 1):
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
+ case IP_VERSION(4, 3, 0):
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ break;
+ case IP_VERSION(4, 2, 0):
+ case IP_VERSION(4, 2, 1):
+ case IP_VERSION(4, 4, 0):
+ case IP_VERSION(4, 4, 2):
+ case IP_VERSION(4, 4, 5):
+ amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
+ break;
+ case IP_VERSION(5, 0, 0):
+ case IP_VERSION(5, 0, 1):
+ case IP_VERSION(5, 0, 2):
+ case IP_VERSION(5, 0, 3):
+ case IP_VERSION(5, 2, 0):
+ case IP_VERSION(5, 2, 1):
+ amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+ break;
+ case IP_VERSION(6, 0, 0):
+ case IP_VERSION(6, 0, 1):
+ case IP_VERSION(6, 0, 2):
+ amdgpu_device_ip_block_add(adev, &ih_v6_0_ip_block);
+ break;
+ case IP_VERSION(6, 1, 0):
+ amdgpu_device_ip_block_add(adev, &ih_v6_1_ip_block);
+ break;
+ case IP_VERSION(7, 0, 0):
+ amdgpu_device_ip_block_add(adev, &ih_v7_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, OSSSYS_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+ case IP_VERSION(9, 0, 0):
+ amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
+ break;
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+ break;
+ case IP_VERSION(11, 0, 8):
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
+ break;
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(12, 0, 1):
+ amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
+ break;
+ case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 3):
+ case IP_VERSION(13, 0, 5):
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 7):
+ case IP_VERSION(13, 0, 8):
+ case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 11):
+ case IP_VERSION(13, 0, 12):
+ case IP_VERSION(13, 0, 14):
+ case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
+ case IP_VERSION(14, 0, 4):
+ amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
+ break;
+ case IP_VERSION(13, 0, 4):
+ amdgpu_device_ip_block_add(adev, &psp_v13_0_4_ip_block);
+ break;
+ case IP_VERSION(14, 0, 2):
+ case IP_VERSION(14, 0, 3):
+ case IP_VERSION(14, 0, 5):
+ amdgpu_device_ip_block_add(adev, &psp_v14_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add psp ip block(MP0_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, MP0_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(9, 0, 0):
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ case IP_VERSION(11, 0, 2):
+ if (adev->asic_type == CHIP_ARCTURUS)
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ else
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ break;
+ case IP_VERSION(11, 0, 8):
+ if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2)
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ break;
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
+ break;
+ case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 3):
+ case IP_VERSION(13, 0, 4):
+ case IP_VERSION(13, 0, 5):
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 7):
+ case IP_VERSION(13, 0, 8):
+ case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 11):
+ case IP_VERSION(13, 0, 14):
+ case IP_VERSION(13, 0, 12):
+ amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
+ break;
+ case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
+ case IP_VERSION(14, 0, 2):
+ case IP_VERSION(14, 0, 3):
+ case IP_VERSION(14, 0, 4):
+ case IP_VERSION(14, 0, 5):
+ amdgpu_device_ip_block_add(adev, &smu_v14_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add smu ip block(MP1_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, MP1_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+#if defined(CONFIG_DRM_AMD_DC)
+static void amdgpu_discovery_set_sriov_display(struct amdgpu_device *adev)
+{
+ amdgpu_device_set_sriov_virtual_display(adev);
+ amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
+}
+#endif
+
+static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
+{
+ if (adev->enable_virtual_display) {
+ amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
+ return 0;
+ }
+
+ if (!amdgpu_device_has_dc_support(adev))
+ return 0;
+
+#if defined(CONFIG_DRM_AMD_DC)
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 0, 3):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 6, 0):
+ case IP_VERSION(4, 1, 0):
+ /* TODO: Fix IP version. DC code expects version 4.0.1 */
+ if (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(4, 1, 0))
+ adev->ip_versions[DCE_HWIP][0] = IP_VERSION(4, 0, 1);
+
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_discovery_set_sriov_display(adev);
+ else
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add dm ip block(DCE_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, DCE_HWIP, 0));
+ return -EINVAL;
+ }
+ } else if (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
+ switch (amdgpu_ip_version(adev, DCI_HWIP, 0)) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ case IP_VERSION(12, 1, 0):
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_discovery_set_sriov_display(adev);
+ else
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add dm ip block(DCI_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, DCI_HWIP, 0));
+ return -EINVAL;
+ }
+ }
+#endif
+ return 0;
+}
+
+static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+ break;
+ case IP_VERSION(9, 4, 3):
+ case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
+ amdgpu_device_ip_block_add(adev, &gfx_v9_4_3_ip_block);
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 7):
+ amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
+ amdgpu_device_ip_block_add(adev, &gfx_v11_0_ip_block);
+ break;
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ amdgpu_device_ip_block_add(adev, &gfx_v12_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev, "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, GC_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 0, 1):
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
+ case IP_VERSION(4, 1, 2):
+ case IP_VERSION(4, 2, 0):
+ case IP_VERSION(4, 2, 2):
+ case IP_VERSION(4, 4, 0):
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+ break;
+ case IP_VERSION(4, 4, 2):
+ case IP_VERSION(4, 4, 5):
+ case IP_VERSION(4, 4, 4):
+ amdgpu_device_ip_block_add(adev, &sdma_v4_4_2_ip_block);
+ break;
+ case IP_VERSION(5, 0, 0):
+ case IP_VERSION(5, 0, 1):
+ case IP_VERSION(5, 0, 2):
+ case IP_VERSION(5, 0, 5):
+ amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
+ break;
+ case IP_VERSION(5, 2, 0):
+ case IP_VERSION(5, 2, 2):
+ case IP_VERSION(5, 2, 4):
+ case IP_VERSION(5, 2, 5):
+ case IP_VERSION(5, 2, 6):
+ case IP_VERSION(5, 2, 3):
+ case IP_VERSION(5, 2, 1):
+ case IP_VERSION(5, 2, 7):
+ amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
+ break;
+ case IP_VERSION(6, 0, 0):
+ case IP_VERSION(6, 0, 1):
+ case IP_VERSION(6, 0, 2):
+ case IP_VERSION(6, 0, 3):
+ case IP_VERSION(6, 1, 0):
+ case IP_VERSION(6, 1, 1):
+ case IP_VERSION(6, 1, 2):
+ case IP_VERSION(6, 1, 3):
+ amdgpu_device_ip_block_add(adev, &sdma_v6_0_ip_block);
+ break;
+ case IP_VERSION(7, 0, 0):
+ case IP_VERSION(7, 0, 1):
+ amdgpu_device_ip_block_add(adev, &sdma_v7_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, SDMA0_HWIP, 0));
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
+{
+ if (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
+ switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
+ case IP_VERSION(7, 0, 0):
+ case IP_VERSION(7, 2, 0):
+ /* UVD is not supported on vega20 SR-IOV */
+ if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
+ amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, UVD_HWIP, 0));
+ return -EINVAL;
+ }
+ switch (amdgpu_ip_version(adev, VCE_HWIP, 0)) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 1, 0):
+ /* VCE is not supported on vega20 SR-IOV */
+ if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
+ amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, VCE_HWIP, 0));
+ return -EINVAL;
+ }
+ } else {
+ switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
+ break;
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 2, 0):
+ amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
+ break;
+ case IP_VERSION(2, 0, 3):
+ break;
+ case IP_VERSION(2, 5, 0):
+ amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
+ break;
+ case IP_VERSION(2, 6, 0):
+ amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
+ break;
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 16):
+ case IP_VERSION(3, 1, 1):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 0, 2):
+ amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
+ break;
+ case IP_VERSION(3, 0, 33):
+ amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
+ break;
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 0, 2):
+ case IP_VERSION(4, 0, 4):
+ amdgpu_device_ip_block_add(adev, &vcn_v4_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v4_0_ip_block);
+ break;
+ case IP_VERSION(4, 0, 3):
+ amdgpu_device_ip_block_add(adev, &vcn_v4_0_3_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v4_0_3_ip_block);
+ break;
+ case IP_VERSION(4, 0, 5):
+ case IP_VERSION(4, 0, 6):
+ amdgpu_device_ip_block_add(adev, &vcn_v4_0_5_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v4_0_5_ip_block);
+ break;
+ case IP_VERSION(5, 0, 0):
+ amdgpu_device_ip_block_add(adev, &vcn_v5_0_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v5_0_0_ip_block);
+ break;
+ case IP_VERSION(5, 0, 1):
+ amdgpu_device_ip_block_add(adev, &vcn_v5_0_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v5_0_1_ip_block);
+ break;
+ default:
+ dev_err(adev->dev,
+ "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
+ amdgpu_ip_version(adev, UVD_HWIP, 0));
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
+ amdgpu_device_ip_block_add(adev, &mes_v11_0_ip_block);
+ adev->enable_mes = true;
+ adev->enable_mes_kiq = true;
+ break;
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ amdgpu_device_ip_block_add(adev, &mes_v12_0_ip_block);
+ adev->enable_mes = true;
+ adev->enable_mes_kiq = true;
+ if (amdgpu_uni_mes)
+ adev->enable_uni_mes = true;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void amdgpu_discovery_init_soc_config(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 4, 3):
+ case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
+ aqua_vanjaram_init_soc_config(adev);
+ break;
+ default:
+ break;
+ }
+}
+
+static int amdgpu_discovery_set_vpe_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
+ case IP_VERSION(6, 1, 0):
+ case IP_VERSION(6, 1, 1):
+ case IP_VERSION(6, 1, 3):
+ amdgpu_device_ip_block_add(adev, &vpe_v6_1_ip_block);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int amdgpu_discovery_set_umsch_mm_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
+ case IP_VERSION(4, 0, 5):
+ case IP_VERSION(4, 0, 6):
+ if (amdgpu_umsch_mm & 0x1) {
+ amdgpu_device_ip_block_add(adev, &umsch_mm_v4_0_ip_block);
+ adev->enable_umsch_mm = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int amdgpu_discovery_set_isp_ip_blocks(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DRM_AMD_ISP)
+ switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) {
+ case IP_VERSION(4, 1, 0):
+ amdgpu_device_ip_block_add(adev, &isp_v4_1_0_ip_block);
+ break;
+ case IP_VERSION(4, 1, 1):
+ amdgpu_device_ip_block_add(adev, &isp_v4_1_1_ip_block);
+ break;
+ default:
+ break;
+ }
+#endif
+
+ return 0;
+}
+
+int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+{
+ int r;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
+ vega10_reg_base_init(adev);
+ adev->sdma.num_instances = 2;
+ adev->gmc.num_umc = 4;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
+ adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
+ break;
+ case CHIP_VEGA12:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
+ vega10_reg_base_init(adev);
+ adev->sdma.num_instances = 2;
+ adev->gmc.num_umc = 4;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
+ adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
+ adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
+ break;
+ case CHIP_RAVEN:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
+ vega10_reg_base_init(adev);
+ adev->sdma.num_instances = 1;
+ adev->vcn.num_vcn_inst = 1;
+ adev->gmc.num_umc = 2;
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
+ adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
+ adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0);
+ } else {
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
+ adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
+ adev->ip_versions[ISP_HWIP][0] = IP_VERSION(2, 0, 0);
+ }
+ break;
+ case CHIP_VEGA20:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
+ vega20_reg_base_init(adev);
+ adev->sdma.num_instances = 2;
+ adev->gmc.num_umc = 8;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
+ adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
+ adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
+ adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
+ adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
+ break;
+ case CHIP_ARCTURUS:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
+ arct_reg_base_init(adev);
+ adev->sdma.num_instances = 8;
+ adev->vcn.num_vcn_inst = 2;
+ adev->gmc.num_umc = 8;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
+ adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
+ break;
+ case CHIP_ALDEBARAN:
+ /* This is not fatal. We only need the discovery
+ * binary for sysfs. We don't need it for a
+ * functional system.
+ */
+ amdgpu_discovery_init(adev);
+ aldebaran_reg_base_init(adev);
+ adev->sdma.num_instances = 5;
+ adev->vcn.num_vcn_inst = 2;
+ adev->gmc.num_umc = 4;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
+ adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
+ adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
+ break;
+ case CHIP_CYAN_SKILLFISH:
+ if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
+ r = amdgpu_discovery_reg_base_init(adev);
+ if (r)
+ return -EINVAL;
+
+ amdgpu_discovery_harvest_ip(adev);
+ amdgpu_discovery_get_gfx_info(adev);
+ amdgpu_discovery_get_mall_info(adev);
+ amdgpu_discovery_get_vcn_info(adev);
+ } else {
+ cyan_skillfish_reg_base_init(adev);
+ adev->sdma.num_instances = 2;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(2, 0, 3);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(2, 0, 3);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(5, 0, 1);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 5, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(2, 1, 1);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(8, 1, 1);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 8);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 8);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 1);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 8);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(10, 1, 3);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 0, 3);
+ }
+ break;
+ default:
+ r = amdgpu_discovery_reg_base_init(adev);
+ if (r) {
+ drm_err(&adev->ddev, "discovery failed: %d\n", r);
+ return r;
+ }
+
+ amdgpu_discovery_harvest_ip(adev);
+ amdgpu_discovery_get_gfx_info(adev);
+ amdgpu_discovery_get_mall_info(adev);
+ amdgpu_discovery_get_vcn_info(adev);
+ break;
+ }
+
+ amdgpu_discovery_init_soc_config(adev);
+ amdgpu_discovery_sysfs_init(adev);
+
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ case IP_VERSION(9, 4, 3):
+ case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
+ adev->family = AMDGPU_FAMILY_AI;
+ break;
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ adev->family = AMDGPU_FAMILY_RV;
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ adev->family = AMDGPU_FAMILY_NV;
+ break;
+ case IP_VERSION(10, 3, 1):
+ adev->family = AMDGPU_FAMILY_VGH;
+ adev->apu_flags |= AMD_APU_IS_VANGOGH;
+ break;
+ case IP_VERSION(10, 3, 3):
+ adev->family = AMDGPU_FAMILY_YC;
+ break;
+ case IP_VERSION(10, 3, 6):
+ adev->family = AMDGPU_FAMILY_GC_10_3_6;
+ break;
+ case IP_VERSION(10, 3, 7):
+ adev->family = AMDGPU_FAMILY_GC_10_3_7;
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ adev->family = AMDGPU_FAMILY_GC_11_0_0;
+ break;
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 4):
+ adev->family = AMDGPU_FAMILY_GC_11_0_1;
+ break;
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
+ adev->family = AMDGPU_FAMILY_GC_11_5_0;
+ break;
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ adev->family = AMDGPU_FAMILY_GC_12_0_0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 6):
+ case IP_VERSION(10, 3, 7):
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
+ adev->flags |= AMD_IS_APU;
+ break;
+ default:
+ break;
+ }
+
+ /* set NBIO version */
+ switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
+ case IP_VERSION(6, 1, 0):
+ case IP_VERSION(6, 2, 0):
+ adev->nbio.funcs = &nbio_v6_1_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 0, 0):
+ case IP_VERSION(7, 0, 1):
+ case IP_VERSION(2, 5, 0):
+ adev->nbio.funcs = &nbio_v7_0_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 4, 0):
+ case IP_VERSION(7, 4, 1):
+ case IP_VERSION(7, 4, 4):
+ adev->nbio.funcs = &nbio_v7_4_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 9, 0):
+ case IP_VERSION(7, 9, 1):
+ adev->nbio.funcs = &nbio_v7_9_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_9_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 11, 0):
+ case IP_VERSION(7, 11, 1):
+ case IP_VERSION(7, 11, 2):
+ case IP_VERSION(7, 11, 3):
+ adev->nbio.funcs = &nbio_v7_11_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_11_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 2, 0):
+ case IP_VERSION(7, 2, 1):
+ case IP_VERSION(7, 3, 0):
+ case IP_VERSION(7, 5, 0):
+ case IP_VERSION(7, 5, 1):
+ adev->nbio.funcs = &nbio_v7_2_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
+ break;
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 3, 0):
+ case IP_VERSION(2, 3, 1):
+ case IP_VERSION(2, 3, 2):
+ case IP_VERSION(3, 3, 0):
+ case IP_VERSION(3, 3, 1):
+ case IP_VERSION(3, 3, 2):
+ case IP_VERSION(3, 3, 3):
+ adev->nbio.funcs = &nbio_v2_3_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
+ break;
+ case IP_VERSION(4, 3, 0):
+ case IP_VERSION(4, 3, 1):
+ if (amdgpu_sriov_vf(adev))
+ adev->nbio.funcs = &nbio_v4_3_sriov_funcs;
+ else
+ adev->nbio.funcs = &nbio_v4_3_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v4_3_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 7, 0):
+ case IP_VERSION(7, 7, 1):
+ adev->nbio.funcs = &nbio_v7_7_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_7_hdp_flush_reg;
+ break;
+ case IP_VERSION(6, 3, 1):
+ adev->nbio.funcs = &nbif_v6_3_1_funcs;
+ adev->nbio.hdp_flush_reg = &nbif_v6_3_1_hdp_flush_reg;
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, HDP_HWIP, 0)) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 0, 1):
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
+ case IP_VERSION(4, 1, 2):
+ case IP_VERSION(4, 2, 0):
+ case IP_VERSION(4, 2, 1):
+ case IP_VERSION(4, 4, 0):
+ case IP_VERSION(4, 4, 2):
+ case IP_VERSION(4, 4, 5):
+ adev->hdp.funcs = &hdp_v4_0_funcs;
+ break;
+ case IP_VERSION(5, 0, 0):
+ case IP_VERSION(5, 0, 1):
+ case IP_VERSION(5, 0, 2):
+ case IP_VERSION(5, 0, 3):
+ case IP_VERSION(5, 0, 4):
+ case IP_VERSION(5, 2, 0):
+ adev->hdp.funcs = &hdp_v5_0_funcs;
+ break;
+ case IP_VERSION(5, 2, 1):
+ adev->hdp.funcs = &hdp_v5_2_funcs;
+ break;
+ case IP_VERSION(6, 0, 0):
+ case IP_VERSION(6, 0, 1):
+ case IP_VERSION(6, 1, 0):
+ adev->hdp.funcs = &hdp_v6_0_funcs;
+ break;
+ case IP_VERSION(7, 0, 0):
+ adev->hdp.funcs = &hdp_v7_0_funcs;
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, DF_HWIP, 0)) {
+ case IP_VERSION(3, 6, 0):
+ case IP_VERSION(3, 6, 1):
+ case IP_VERSION(3, 6, 2):
+ adev->df.funcs = &df_v3_6_funcs;
+ break;
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 5, 2):
+ adev->df.funcs = &df_v1_7_funcs;
+ break;
+ case IP_VERSION(4, 3, 0):
+ adev->df.funcs = &df_v4_3_funcs;
+ break;
+ case IP_VERSION(4, 6, 2):
+ adev->df.funcs = &df_v4_6_2_funcs;
+ break;
+ case IP_VERSION(4, 15, 0):
+ case IP_VERSION(4, 15, 1):
+ adev->df.funcs = &df_v4_15_funcs;
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, SMUIO_HWIP, 0)) {
+ case IP_VERSION(9, 0, 0):
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ case IP_VERSION(10, 0, 2):
+ adev->smuio.funcs = &smuio_v9_0_funcs;
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 8):
+ adev->smuio.funcs = &smuio_v11_0_funcs;
+ break;
+ case IP_VERSION(11, 0, 6):
+ case IP_VERSION(11, 0, 10):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 9):
+ case IP_VERSION(13, 0, 10):
+ adev->smuio.funcs = &smuio_v11_0_6_funcs;
+ break;
+ case IP_VERSION(13, 0, 2):
+ adev->smuio.funcs = &smuio_v13_0_funcs;
+ break;
+ case IP_VERSION(13, 0, 3):
+ case IP_VERSION(13, 0, 11):
+ adev->smuio.funcs = &smuio_v13_0_3_funcs;
+ if (adev->smuio.funcs->get_pkg_type(adev) == AMDGPU_PKG_TYPE_APU) {
+ adev->flags |= AMD_IS_APU;
+ }
+ break;
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 8):
+ case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
+ adev->smuio.funcs = &smuio_v13_0_6_funcs;
+ break;
+ case IP_VERSION(14, 0, 2):
+ adev->smuio.funcs = &smuio_v14_0_2_funcs;
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, LSDMA_HWIP, 0)) {
+ case IP_VERSION(6, 0, 0):
+ case IP_VERSION(6, 0, 1):
+ case IP_VERSION(6, 0, 2):
+ case IP_VERSION(6, 0, 3):
+ adev->lsdma.funcs = &lsdma_v6_0_funcs;
+ break;
+ case IP_VERSION(7, 0, 0):
+ case IP_VERSION(7, 0, 1):
+ adev->lsdma.funcs = &lsdma_v7_0_funcs;
+ break;
+ default:
+ break;
+ }
+
+ r = amdgpu_discovery_set_common_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_gmc_ip_blocks(adev);
+ if (r)
+ return r;
+
+ /* For SR-IOV, PSP needs to be initialized before IH */
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_discovery_set_psp_ip_blocks(adev);
+ if (r)
+ return r;
+ r = amdgpu_discovery_set_ih_ip_blocks(adev);
+ if (r)
+ return r;
+ } else {
+ r = amdgpu_discovery_set_ih_ip_blocks(adev);
+ if (r)
+ return r;
+
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
+ r = amdgpu_discovery_set_psp_ip_blocks(adev);
+ if (r)
+ return r;
+ }
+ }
+
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
+ r = amdgpu_discovery_set_smu_ip_blocks(adev);
+ if (r)
+ return r;
+ }
+
+ r = amdgpu_discovery_set_display_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_gc_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_sdma_ip_blocks(adev);
+ if (r)
+ return r;
+
+ if ((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
+ !amdgpu_sriov_vf(adev)) ||
+ (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO && amdgpu_dpm == 1)) {
+ r = amdgpu_discovery_set_smu_ip_blocks(adev);
+ if (r)
+ return r;
+ }
+
+ r = amdgpu_discovery_set_mm_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_mes_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_vpe_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_umsch_mm_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_isp_ip_blocks(adev);
+ if (r)
+ return r;
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
new file mode 100644
index 000000000000..b44d56465c5b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_DISCOVERY__
+#define __AMDGPU_DISCOVERY__
+
+#define DISCOVERY_TMR_SIZE (10 << 10)
+#define DISCOVERY_TMR_OFFSET (64 << 10)
+
+void amdgpu_discovery_fini(struct amdgpu_device *adev);
+int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev);
+
+int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
+ uint32_t *nps_type,
+ struct amdgpu_gmc_memrange **ranges,
+ int *range_cnt, bool refresh);
+
+#endif /* __AMDGPU_DISCOVERY__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index cdf2ab20166a..51bab32fd8c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -23,19 +23,73 @@
* Authors: Dave Airlie
* Alex Deucher
*/
-#include <drm/drmP.h>
+
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_i2c.h"
#include "atom.h"
#include "amdgpu_connectors.h"
+#include "amdgpu_display.h"
+#include "soc15_common.h"
+#include "gc/gc_11_0_0_offset.h"
+#include "gc/gc_11_0_0_sh_mask.h"
+#include "bif/bif_4_1_d.h"
#include <asm/div64.h>
+#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_vblank.h>
-static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb)
+/**
+ * amdgpu_display_hotplug_work_func - work handler for display hotplug event
+ *
+ * @work: work struct pointer
+ *
+ * This is the hotplug event work handler (all ASICs).
+ * The work gets scheduled from the IRQ handler if there
+ * was a hotplug interrupt. It walks through the connector table
+ * and calls hotplug handler for each connector. After this, it sends
+ * a DRM hotplug event to alert userspace.
+ *
+ * This design approach is required in order to defer hotplug event handling
+ * from the IRQ handler to a work handler because hotplug handler has to use
+ * mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
+ * sleep).
+ */
+void amdgpu_display_hotplug_work_func(struct work_struct *work)
+{
+ struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
+ hotplug_work.work);
+ struct drm_device *dev = adev_to_drm(adev);
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+
+ mutex_lock(&mode_config->mutex);
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
+ amdgpu_connector_hotplug(connector);
+ drm_connector_list_iter_end(&iter);
+ mutex_unlock(&mode_config->mutex);
+ /* Just fire off a uevent and let userspace tell us what to do */
+ drm_helper_hpd_irq_event(dev);
+}
+
+static int amdgpu_display_framebuffer_init(struct drm_device *dev,
+ struct amdgpu_framebuffer *rfb,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+static void amdgpu_display_flip_callback(struct dma_fence *f,
+ struct dma_fence_cb *cb)
{
struct amdgpu_flip_work *work =
container_of(cb, struct amdgpu_flip_work, cb);
@@ -44,24 +98,25 @@ static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb)
schedule_work(&work->flip_work.work);
}
-static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
- struct dma_fence **f)
+static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work,
+ struct dma_fence **f)
{
- struct dma_fence *fence= *f;
+ struct dma_fence *fence = *f;
if (fence == NULL)
return false;
*f = NULL;
- if (!dma_fence_add_callback(fence, &work->cb, amdgpu_flip_callback))
+ if (!dma_fence_add_callback(fence, &work->cb,
+ amdgpu_display_flip_callback))
return true;
dma_fence_put(fence);
return false;
}
-static void amdgpu_flip_work_func(struct work_struct *__work)
+static void amdgpu_display_flip_work_func(struct work_struct *__work)
{
struct delayed_work *delayed_work =
container_of(__work, struct delayed_work, work);
@@ -72,27 +127,24 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
struct drm_crtc *crtc = &amdgpu_crtc->base;
unsigned long flags;
- unsigned i;
+ unsigned int i;
int vpos, hpos;
- if (amdgpu_flip_handle_fence(work, &work->excl))
- return;
-
for (i = 0; i < work->shared_count; ++i)
- if (amdgpu_flip_handle_fence(work, &work->shared[i]))
+ if (amdgpu_display_flip_handle_fence(work, &work->shared[i]))
return;
/* Wait until we're out of the vertical blank period before the one
* targeted by the flip
*/
if (amdgpu_crtc->enabled &&
- (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0,
- &vpos, &hpos, NULL, NULL,
- &crtc->hwmode)
+ (amdgpu_display_get_crtc_scanoutpos(adev_to_drm(adev), work->crtc_id, 0,
+ &vpos, &hpos, NULL, NULL,
+ &crtc->hwmode)
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(int)(work->target_vblank -
- amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) {
+ amdgpu_get_vblank_counter_kms(crtc)) > 0) {
schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
return;
}
@@ -108,15 +160,16 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
- amdgpu_crtc->crtc_id, amdgpu_crtc, work);
+ drm_dbg_vbl(adev_to_drm(adev),
+ "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
+ amdgpu_crtc->crtc_id, amdgpu_crtc, work);
}
/*
* Handle unpin events outside the interrupt handler proper.
*/
-static void amdgpu_unpin_work_func(struct work_struct *__work)
+static void amdgpu_display_unpin_work_func(struct work_struct *__work)
{
struct amdgpu_flip_work *work =
container_of(__work, struct amdgpu_flip_work, unpin_work);
@@ -125,10 +178,7 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
/* unpin of the old buffer */
r = amdgpu_bo_reserve(work->old_abo, true);
if (likely(r == 0)) {
- r = amdgpu_bo_unpin(work->old_abo);
- if (unlikely(r != 0)) {
- DRM_ERROR("failed to unpin buffer after flip\n");
- }
+ amdgpu_bo_unpin(work->old_abo);
amdgpu_bo_unreserve(work->old_abo);
} else
DRM_ERROR("failed to reserve buffer after flip\n");
@@ -138,31 +188,28 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
kfree(work);
}
-int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags, uint32_t target,
- struct drm_modeset_acquire_ctx *ctx)
+int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags, uint32_t target,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_framebuffer *old_amdgpu_fb;
- struct amdgpu_framebuffer *new_amdgpu_fb;
struct drm_gem_object *obj;
struct amdgpu_flip_work *work;
struct amdgpu_bo *new_abo;
unsigned long flags;
u64 tiling_flags;
- u64 base;
int i, r;
- work = kzalloc(sizeof *work, GFP_KERNEL);
+ work = kzalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL)
return -ENOMEM;
- INIT_DELAYED_WORK(&work->flip_work, amdgpu_flip_work_func);
- INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func);
+ INIT_DELAYED_WORK(&work->flip_work, amdgpu_display_flip_work_func);
+ INIT_WORK(&work->unpin_work, amdgpu_display_unpin_work_func);
work->event = event;
work->adev = adev;
@@ -170,15 +217,13 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
/* schedule unpin of the old buffer */
- old_amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
- obj = old_amdgpu_fb->obj;
+ obj = crtc->primary->fb->obj[0];
/* take a reference to the old object */
work->old_abo = gem_to_amdgpu_bo(obj);
amdgpu_bo_ref(work->old_abo);
- new_amdgpu_fb = to_amdgpu_framebuffer(fb);
- obj = new_amdgpu_fb->obj;
+ obj = fb->obj[0];
new_abo = gem_to_amdgpu_bo(obj);
/* pin the new buffer */
@@ -188,15 +233,25 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
goto cleanup;
}
- r = amdgpu_bo_pin(new_abo, AMDGPU_GEM_DOMAIN_VRAM, &base);
+ if (!adev->enable_virtual_display) {
+ new_abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ r = amdgpu_bo_pin(new_abo,
+ amdgpu_display_supported_domains(adev, new_abo->flags));
+ if (unlikely(r != 0)) {
+ DRM_ERROR("failed to pin new abo buffer before flip\n");
+ goto unreserve;
+ }
+ }
+
+ r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
if (unlikely(r != 0)) {
- DRM_ERROR("failed to pin new abo buffer before flip\n");
- goto unreserve;
+ DRM_ERROR("%p bind failed\n", new_abo);
+ goto unpin;
}
- r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl,
- &work->shared_count,
- &work->shared);
+ r = dma_resv_get_fences(new_abo->tbo.base.resv, DMA_RESV_USAGE_WRITE,
+ &work->shared_count,
+ &work->shared);
if (unlikely(r != 0)) {
DRM_ERROR("failed to get fences for buffer\n");
goto unpin;
@@ -205,9 +260,10 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
amdgpu_bo_unreserve(new_abo);
- work->base = base;
- work->target_vblank = target - drm_crtc_vblank_count(crtc) +
- amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
+ if (!adev->enable_virtual_display)
+ work->base = amdgpu_bo_gpu_offset(new_abo);
+ work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
+ amdgpu_get_vblank_counter_kms(crtc);
/* we borrow the event spin lock for protecting flip_wrok */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -227,7 +283,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
/* update crtc fb */
crtc->primary->fb = fb;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- amdgpu_flip_work_func(&work->flip_work.work);
+ amdgpu_display_flip_work_func(&work->flip_work.work);
return 0;
pflip_cleanup:
@@ -236,15 +292,14 @@ pflip_cleanup:
goto cleanup;
}
unpin:
- if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) {
- DRM_ERROR("failed to unpin new abo in error path\n");
- }
+ if (!adev->enable_virtual_display)
+ amdgpu_bo_unpin(new_abo);
+
unreserve:
amdgpu_bo_unreserve(new_abo);
cleanup:
amdgpu_bo_unref(&work->old_abo);
- dma_fence_put(work->excl);
for (i = 0; i < work->shared_count; ++i)
dma_fence_put(work->shared[i]);
kfree(work->shared);
@@ -253,8 +308,8 @@ cleanup:
return r;
}
-int amdgpu_crtc_set_config(struct drm_mode_set *set,
- struct drm_modeset_acquire_ctx *ctx)
+int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
+ struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev;
struct amdgpu_device *adev;
@@ -269,7 +324,7 @@ int amdgpu_crtc_set_config(struct drm_mode_set *set,
ret = pm_runtime_get_sync(dev->dev);
if (ret < 0)
- return ret;
+ goto out;
ret = drm_crtc_helper_set_config(set, ctx);
@@ -279,20 +334,20 @@ int amdgpu_crtc_set_config(struct drm_mode_set *set,
pm_runtime_mark_last_busy(dev->dev);
- adev = dev->dev_private;
+ adev = drm_to_adev(dev);
/* if we have active crtcs and we don't have a power ref,
- take the current one */
+ * take the current one
+ */
if (active && !adev->have_disp_power_ref) {
adev->have_disp_power_ref = true;
return ret;
}
- /* if we have no active crtcs, then drop the power ref
- we got before */
- if (!active && adev->have_disp_power_ref) {
- pm_runtime_put_autosuspend(dev->dev);
+ /* if we have no active crtcs, then go to
+ * drop the power ref we got before
+ */
+ if (!active && adev->have_disp_power_ref)
adev->have_disp_power_ref = false;
- }
-
+out:
/* drop the power reference we got coming in here */
pm_runtime_put_autosuspend(dev->dev);
return ret;
@@ -351,17 +406,19 @@ static const char *hpd_names[6] = {
"HPD6",
};
-void amdgpu_print_display_setup(struct drm_device *dev)
+void amdgpu_display_print_display_setup(struct drm_device *dev)
{
struct drm_connector *connector;
struct amdgpu_connector *amdgpu_connector;
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
+ struct drm_connector_list_iter iter;
uint32_t devices;
int i = 0;
+ drm_connector_list_iter_begin(dev, &iter);
DRM_INFO("AMDGPU Display Connectors\n");
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
DRM_INFO("Connector %d:\n", i);
DRM_INFO(" %s\n", connector->name);
@@ -425,14 +482,11 @@ void amdgpu_print_display_setup(struct drm_device *dev)
}
i++;
}
+ drm_connector_list_iter_end(&iter);
}
-/**
- * amdgpu_ddc_probe
- *
- */
-bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector,
- bool use_aux)
+bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
+ bool use_aux)
{
u8 out = 0x0;
u8 buf[8];
@@ -456,11 +510,10 @@ bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector,
if (amdgpu_connector->router.ddc_valid)
amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
- if (use_aux) {
+ if (use_aux)
ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2);
- } else {
+ else
ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2);
- }
if (ret != 2)
/* Couldn't find an accessible DDC on this connector */
@@ -469,172 +522,899 @@ bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector,
* EDID header starts with:
* 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
* Only the first 6 bytes must be valid as
- * drm_edid_block_valid() can fix the last 2 bytes */
+ * drm_edid_block_valid() can fix the last 2 bytes
+ */
if (drm_edid_header_is_valid(buf) < 6) {
/* Couldn't find an accessible EDID on this
- * connector */
+ * connector
+ */
return false;
}
return true;
}
-static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
+static int amdgpu_dirtyfb(struct drm_framebuffer *fb, struct drm_file *file,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *clips, unsigned int num_clips)
{
- struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
- drm_gem_object_unreference_unlocked(amdgpu_fb->obj);
- drm_framebuffer_cleanup(fb);
- kfree(amdgpu_fb);
+ if (file)
+ return -ENOSYS;
+
+ return drm_atomic_helper_dirtyfb(fb, file, flags, color, clips,
+ num_clips);
}
-static int amdgpu_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
- unsigned int *handle)
-{
- struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
+static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+};
- return drm_gem_handle_create(file_priv, amdgpu_fb->obj, handle);
+static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+ .dirty = amdgpu_dirtyfb
+};
+
+uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
+ uint64_t bo_flags)
+{
+ uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
+
+#if defined(CONFIG_DRM_AMD_DC)
+ /*
+ * if amdgpu_bo_support_uswc returns false it means that USWC mappings
+ * is not supported for this board. But this mapping is required
+ * to avoid hang caused by placement of scanout BO in GTT on certain
+ * APUs. So force the BO placement to VRAM in case this architecture
+ * will not allow USWC mappings.
+ * Also, don't allow GTT domain if the BO doesn't have USWC flag set.
+ */
+ if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
+ amdgpu_bo_support_uswc(bo_flags) &&
+ adev->dc_enabled &&
+ adev->mode_info.gpu_vm_support)
+ domain |= AMDGPU_GEM_DOMAIN_GTT;
+#endif
+
+ return domain;
}
-static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
- .destroy = amdgpu_user_framebuffer_destroy,
- .create_handle = amdgpu_user_framebuffer_create_handle,
+static const struct drm_format_info dcc_formats[] = {
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 2,
+ .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 2,
+ .cpp = { 2, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+};
+
+static const struct drm_format_info dcc_retile_formats[] = {
+ { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
+ { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 3,
+ .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
+ .has_alpha = true, },
+ { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 3,
+ .cpp = { 2, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
};
-int
-amdgpu_framebuffer_init(struct drm_device *dev,
- struct amdgpu_framebuffer *rfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
+static const struct drm_format_info *
+lookup_format_info(const struct drm_format_info formats[],
+ int num_formats, u32 format)
+{
+ int i;
+
+ for (i = 0; i < num_formats; i++) {
+ if (formats[i].format == format)
+ return &formats[i];
+ }
+
+ return NULL;
+}
+
+const struct drm_format_info *
+amdgpu_lookup_format_info(u32 format, uint64_t modifier)
+{
+ if (!IS_AMD_FMT_MOD(modifier))
+ return NULL;
+
+ if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) < AMD_FMT_MOD_TILE_VER_GFX9 ||
+ AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX12)
+ return NULL;
+
+ if (AMD_FMT_MOD_GET(DCC_RETILE, modifier))
+ return lookup_format_info(dcc_retile_formats,
+ ARRAY_SIZE(dcc_retile_formats),
+ format);
+
+ if (AMD_FMT_MOD_GET(DCC, modifier))
+ return lookup_format_info(dcc_formats, ARRAY_SIZE(dcc_formats),
+ format);
+
+ /* returning NULL will cause the default format structs to be used. */
+ return NULL;
+}
+
+
+/*
+ * Tries to extract the renderable DCC offset from the opaque metadata attached
+ * to the buffer.
+ */
+static int
+extract_render_dcc_offset(struct amdgpu_device *adev,
+ struct drm_gem_object *obj,
+ uint64_t *offset)
+{
+ struct amdgpu_bo *rbo;
+ int r = 0;
+ uint32_t metadata[10]; /* Something that fits a descriptor + header. */
+ uint32_t size;
+
+ rbo = gem_to_amdgpu_bo(obj);
+ r = amdgpu_bo_reserve(rbo, false);
+
+ if (unlikely(r)) {
+ /* Don't show error message when returning -ERESTARTSYS */
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Unable to reserve buffer: %d\n", r);
+ return r;
+ }
+
+ r = amdgpu_bo_get_metadata(rbo, metadata, sizeof(metadata), &size, NULL);
+ amdgpu_bo_unreserve(rbo);
+
+ if (r)
+ return r;
+
+ /*
+ * The first word is the metadata version, and we need space for at least
+ * the version + pci vendor+device id + 8 words for a descriptor.
+ */
+ if (size < 40 || metadata[0] != 1)
+ return -EINVAL;
+
+ if (adev->family >= AMDGPU_FAMILY_NV) {
+ /* resource word 6/7 META_DATA_ADDRESS{_LO} */
+ *offset = ((u64)metadata[9] << 16u) |
+ ((metadata[8] & 0xFF000000u) >> 16);
+ } else {
+ /* resource word 5/7 META_DATA_ADDRESS */
+ *offset = ((u64)metadata[9] << 8u) |
+ ((u64)(metadata[7] & 0x1FE0000u) << 23);
+ }
+
+ return 0;
+}
+
+static int convert_tiling_flags_to_modifier_gfx12(struct amdgpu_framebuffer *afb)
+{
+ u64 modifier = 0;
+ int swizzle_mode = AMDGPU_TILING_GET(afb->tiling_flags, GFX12_SWIZZLE_MODE);
+
+ if (!swizzle_mode) {
+ modifier = DRM_FORMAT_MOD_LINEAR;
+ } else {
+ int max_comp_block =
+ AMDGPU_TILING_GET(afb->tiling_flags, GFX12_DCC_MAX_COMPRESSED_BLOCK);
+
+ modifier =
+ AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX12) |
+ AMD_FMT_MOD_SET(TILE, swizzle_mode) |
+ AMD_FMT_MOD_SET(DCC, afb->gfx12_dcc) |
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_comp_block);
+ }
+
+ afb->base.modifier = modifier;
+ afb->base.flags |= DRM_MODE_FB_MODIFIERS;
+ return 0;
+}
+
+static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
+{
+ struct amdgpu_device *adev = drm_to_adev(afb->base.dev);
+ uint64_t modifier = 0;
+ int num_pipes = 0;
+ int num_pkrs = 0;
+
+ num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
+ num_pipes = adev->gfx.config.gb_addr_config_fields.num_pipes;
+
+ if (!afb->tiling_flags || !AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) {
+ modifier = DRM_FORMAT_MOD_LINEAR;
+ } else {
+ int swizzle = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE);
+ bool has_xor = swizzle >= 16;
+ int block_size_bits;
+ int version;
+ int pipe_xor_bits = 0;
+ int bank_xor_bits = 0;
+ int packers = 0;
+ int rb = 0;
+ int pipes = ilog2(num_pipes);
+ uint32_t dcc_offset = AMDGPU_TILING_GET(afb->tiling_flags, DCC_OFFSET_256B);
+
+ switch (swizzle >> 2) {
+ case 0: /* 256B */
+ block_size_bits = 8;
+ break;
+ case 1: /* 4KiB */
+ case 5: /* 4KiB _X */
+ block_size_bits = 12;
+ break;
+ case 2: /* 64KiB */
+ case 4: /* 64 KiB _T */
+ case 6: /* 64 KiB _X */
+ block_size_bits = 16;
+ break;
+ case 7: /* 256 KiB */
+ block_size_bits = 18;
+ break;
+ default:
+ /* RESERVED or VAR */
+ return -EINVAL;
+ }
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0))
+ version = AMD_FMT_MOD_TILE_VER_GFX11;
+ else if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
+ IP_VERSION(10, 3, 0))
+ version = AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
+ else if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
+ IP_VERSION(10, 0, 0))
+ version = AMD_FMT_MOD_TILE_VER_GFX10;
+ else
+ version = AMD_FMT_MOD_TILE_VER_GFX9;
+
+ switch (swizzle & 3) {
+ case 0: /* Z microtiling */
+ return -EINVAL;
+ case 1: /* S microtiling */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) <
+ IP_VERSION(11, 0, 0)) {
+ if (!has_xor)
+ version = AMD_FMT_MOD_TILE_VER_GFX9;
+ }
+ break;
+ case 2:
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) <
+ IP_VERSION(11, 0, 0)) {
+ if (!has_xor && afb->base.format->cpp[0] != 4)
+ version = AMD_FMT_MOD_TILE_VER_GFX9;
+ }
+ break;
+ case 3:
+ break;
+ }
+
+ if (has_xor) {
+ if (num_pipes == num_pkrs && num_pkrs == 0) {
+ DRM_ERROR("invalid number of pipes and packers\n");
+ return -EINVAL;
+ }
+
+ switch (version) {
+ case AMD_FMT_MOD_TILE_VER_GFX11:
+ pipe_xor_bits = min(block_size_bits - 8, pipes);
+ packers = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
+ break;
+ case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS:
+ pipe_xor_bits = min(block_size_bits - 8, pipes);
+ packers = min(block_size_bits - 8 - pipe_xor_bits,
+ ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs));
+ break;
+ case AMD_FMT_MOD_TILE_VER_GFX10:
+ pipe_xor_bits = min(block_size_bits - 8, pipes);
+ break;
+ case AMD_FMT_MOD_TILE_VER_GFX9:
+ rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
+ ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
+ pipe_xor_bits = min(block_size_bits - 8, pipes +
+ ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
+ bank_xor_bits = min(block_size_bits - 8 - pipe_xor_bits,
+ ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
+ break;
+ }
+ }
+
+ modifier = AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) |
+ AMD_FMT_MOD_SET(TILE_VERSION, version) |
+ AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
+ AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
+ AMD_FMT_MOD_SET(PACKERS, packers);
+
+ if (dcc_offset != 0) {
+ bool dcc_i64b = AMDGPU_TILING_GET(afb->tiling_flags, DCC_INDEPENDENT_64B) != 0;
+ bool dcc_i128b = version >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
+ const struct drm_format_info *format_info;
+ u64 render_dcc_offset;
+
+ /* Enable constant encode on RAVEN2 and later. */
+ bool dcc_constant_encode =
+ (adev->asic_type > CHIP_RAVEN ||
+ (adev->asic_type == CHIP_RAVEN &&
+ adev->external_rev_id >= 0x81)) &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) <
+ IP_VERSION(11, 0, 0);
+
+ int max_cblock_size = dcc_i64b ? AMD_FMT_MOD_DCC_BLOCK_64B :
+ dcc_i128b ? AMD_FMT_MOD_DCC_BLOCK_128B :
+ AMD_FMT_MOD_DCC_BLOCK_256B;
+
+ modifier |= AMD_FMT_MOD_SET(DCC, 1) |
+ AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, dcc_constant_encode) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, dcc_i64b) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, dcc_i128b) |
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_cblock_size);
+
+ afb->base.offsets[1] = dcc_offset * 256 + afb->base.offsets[0];
+ afb->base.pitches[1] =
+ AMDGPU_TILING_GET(afb->tiling_flags, DCC_PITCH_MAX) + 1;
+
+ /*
+ * If the userspace driver uses retiling the tiling flags do not contain
+ * info on the renderable DCC buffer. Luckily the opaque metadata contains
+ * the info so we can try to extract it. The kernel does not use this info
+ * but we should convert it to a modifier plane for getfb2, so the
+ * userspace driver that gets it doesn't have to juggle around another DCC
+ * plane internally.
+ */
+ if (extract_render_dcc_offset(adev, afb->base.obj[0],
+ &render_dcc_offset) == 0 &&
+ render_dcc_offset != 0 &&
+ render_dcc_offset != afb->base.offsets[1] &&
+ render_dcc_offset < UINT_MAX) {
+ uint32_t dcc_block_bits; /* of base surface data */
+
+ modifier |= AMD_FMT_MOD_SET(DCC_RETILE, 1);
+ afb->base.offsets[2] = render_dcc_offset;
+
+ if (adev->family >= AMDGPU_FAMILY_NV) {
+ int extra_pipe = 0;
+
+ if ((amdgpu_ip_version(adev, GC_HWIP,
+ 0) >=
+ IP_VERSION(10, 3, 0)) &&
+ pipes == packers && pipes > 1)
+ extra_pipe = 1;
+
+ dcc_block_bits = max(20, 16 + pipes + extra_pipe);
+ } else {
+ modifier |= AMD_FMT_MOD_SET(RB, rb) |
+ AMD_FMT_MOD_SET(PIPE, pipes);
+ dcc_block_bits = max(20, 18 + rb);
+ }
+
+ dcc_block_bits -= ilog2(afb->base.format->cpp[0]);
+ afb->base.pitches[2] = ALIGN(afb->base.width,
+ 1u << ((dcc_block_bits + 1) / 2));
+ }
+ format_info = amdgpu_lookup_format_info(afb->base.format->format,
+ modifier);
+ if (!format_info)
+ return -EINVAL;
+
+ afb->base.format = format_info;
+ }
+ }
+
+ afb->base.modifier = modifier;
+ afb->base.flags |= DRM_MODE_FB_MODIFIERS;
+ return 0;
+}
+
+/* Mirrors the is_displayable check in radeonsi's gfx6_compute_surface */
+static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)
+{
+ u64 micro_tile_mode;
+
+ if (AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) == 1) /* LINEAR_ALIGNED */
+ return 0;
+
+ micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);
+ switch (micro_tile_mode) {
+ case 0: /* DISPLAY */
+ case 3: /* RENDER */
+ return 0;
+ default:
+ drm_dbg_kms(afb->base.dev,
+ "Micro tile mode %llu not supported for scanout\n",
+ micro_tile_mode);
+ return -EINVAL;
+ }
+}
+
+static void get_block_dimensions(unsigned int block_log2, unsigned int cpp,
+ unsigned int *width, unsigned int *height)
+{
+ unsigned int cpp_log2 = ilog2(cpp);
+ unsigned int pixel_log2 = block_log2 - cpp_log2;
+ unsigned int width_log2 = (pixel_log2 + 1) / 2;
+ unsigned int height_log2 = pixel_log2 - width_log2;
+
+ *width = 1 << width_log2;
+ *height = 1 << height_log2;
+}
+
+static unsigned int get_dcc_block_size(uint64_t modifier, bool rb_aligned,
+ bool pipe_aligned)
+{
+ unsigned int ver = AMD_FMT_MOD_GET(TILE_VERSION, modifier);
+
+ switch (ver) {
+ case AMD_FMT_MOD_TILE_VER_GFX9: {
+ /*
+ * TODO: for pipe aligned we may need to check the alignment of the
+ * total size of the surface, which may need to be bigger than the
+ * natural alignment due to some HW workarounds
+ */
+ return max(10 + (rb_aligned ? (int)AMD_FMT_MOD_GET(RB, modifier) : 0), 12);
+ }
+ case AMD_FMT_MOD_TILE_VER_GFX10:
+ case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS:
+ case AMD_FMT_MOD_TILE_VER_GFX11: {
+ int pipes_log2 = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
+
+ if (ver >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS && pipes_log2 > 1 &&
+ AMD_FMT_MOD_GET(PACKERS, modifier) == pipes_log2)
+ ++pipes_log2;
+
+ return max(8 + (pipe_aligned ? pipes_log2 : 0), 12);
+ }
+ default:
+ return 0;
+ }
+}
+
+static int amdgpu_display_verify_plane(struct amdgpu_framebuffer *rfb, int plane,
+ const struct drm_format_info *format,
+ unsigned int block_width, unsigned int block_height,
+ unsigned int block_size_log2)
+{
+ unsigned int width = rfb->base.width /
+ ((plane && plane < format->num_planes) ? format->hsub : 1);
+ unsigned int height = rfb->base.height /
+ ((plane && plane < format->num_planes) ? format->vsub : 1);
+ unsigned int cpp = plane < format->num_planes ? format->cpp[plane] : 1;
+ unsigned int block_pitch = block_width * cpp;
+ unsigned int min_pitch = ALIGN(width * cpp, block_pitch);
+ unsigned int block_size = 1 << block_size_log2;
+ uint64_t size;
+
+ if (rfb->base.pitches[plane] % block_pitch) {
+ drm_dbg_kms(rfb->base.dev,
+ "pitch %d for plane %d is not a multiple of block pitch %d\n",
+ rfb->base.pitches[plane], plane, block_pitch);
+ return -EINVAL;
+ }
+ if (rfb->base.pitches[plane] < min_pitch) {
+ drm_dbg_kms(rfb->base.dev,
+ "pitch %d for plane %d is less than minimum pitch %d\n",
+ rfb->base.pitches[plane], plane, min_pitch);
+ return -EINVAL;
+ }
+
+ /* Force at least natural alignment. */
+ if (rfb->base.offsets[plane] % block_size) {
+ drm_dbg_kms(rfb->base.dev,
+ "offset 0x%x for plane %d is not a multiple of block pitch 0x%x\n",
+ rfb->base.offsets[plane], plane, block_size);
+ return -EINVAL;
+ }
+
+ size = rfb->base.offsets[plane] +
+ (uint64_t)rfb->base.pitches[plane] / block_pitch *
+ block_size * DIV_ROUND_UP(height, block_height);
+
+ if (rfb->base.obj[0]->size < size) {
+ drm_dbg_kms(rfb->base.dev,
+ "BO size 0x%zx is less than 0x%llx required for plane %d\n",
+ rfb->base.obj[0]->size, size, plane);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb)
{
+ const struct drm_format_info *format_info = drm_format_info(rfb->base.format->format);
+ uint64_t modifier = rfb->base.modifier;
int ret;
- rfb->obj = obj;
- drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
- if (ret) {
- rfb->obj = NULL;
+ unsigned int i, block_width, block_height, block_size_log2;
+
+ if (rfb->base.dev->mode_config.fb_modifiers_not_supported)
+ return 0;
+
+ for (i = 0; i < format_info->num_planes; ++i) {
+ if (modifier == DRM_FORMAT_MOD_LINEAR) {
+ block_width = 256 / format_info->cpp[i];
+ block_height = 1;
+ block_size_log2 = 8;
+ } else if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX12) {
+ int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
+
+ switch (swizzle) {
+ case AMD_FMT_MOD_TILE_GFX12_256B_2D:
+ block_size_log2 = 8;
+ break;
+ case AMD_FMT_MOD_TILE_GFX12_4K_2D:
+ block_size_log2 = 12;
+ break;
+ case AMD_FMT_MOD_TILE_GFX12_64K_2D:
+ block_size_log2 = 16;
+ break;
+ case AMD_FMT_MOD_TILE_GFX12_256K_2D:
+ block_size_log2 = 18;
+ break;
+ default:
+ drm_dbg_kms(rfb->base.dev,
+ "Gfx12 swizzle mode with unknown block size: %d\n", swizzle);
+ return -EINVAL;
+ }
+
+ get_block_dimensions(block_size_log2, format_info->cpp[i],
+ &block_width, &block_height);
+ } else {
+ int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
+
+ switch ((swizzle & ~3) + 1) {
+ case DC_SW_256B_S:
+ block_size_log2 = 8;
+ break;
+ case DC_SW_4KB_S:
+ case DC_SW_4KB_S_X:
+ block_size_log2 = 12;
+ break;
+ case DC_SW_64KB_S:
+ case DC_SW_64KB_S_T:
+ case DC_SW_64KB_S_X:
+ block_size_log2 = 16;
+ break;
+ case DC_SW_VAR_S_X:
+ block_size_log2 = 18;
+ break;
+ default:
+ drm_dbg_kms(rfb->base.dev,
+ "Swizzle mode with unknown block size: %d\n", swizzle);
+ return -EINVAL;
+ }
+
+ get_block_dimensions(block_size_log2, format_info->cpp[i],
+ &block_width, &block_height);
+ }
+
+ ret = amdgpu_display_verify_plane(rfb, i, format_info,
+ block_width, block_height, block_size_log2);
+ if (ret)
+ return ret;
+ }
+
+ if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) <= AMD_FMT_MOD_TILE_VER_GFX11 &&
+ AMD_FMT_MOD_GET(DCC, modifier)) {
+ if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) {
+ block_size_log2 = get_dcc_block_size(modifier, false, false);
+ get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
+ &block_width, &block_height);
+ ret = amdgpu_display_verify_plane(rfb, i, format_info,
+ block_width, block_height,
+ block_size_log2);
+ if (ret)
+ return ret;
+
+ ++i;
+ block_size_log2 = get_dcc_block_size(modifier, true, true);
+ } else {
+ bool pipe_aligned = AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier);
+
+ block_size_log2 = get_dcc_block_size(modifier, true, pipe_aligned);
+ }
+ get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
+ &block_width, &block_height);
+ ret = amdgpu_display_verify_plane(rfb, i, format_info,
+ block_width, block_height, block_size_log2);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
+ uint64_t *tiling_flags, bool *tmz_surface,
+ bool *gfx12_dcc)
+{
+ struct amdgpu_bo *rbo;
+ int r;
+
+ if (!amdgpu_fb) {
+ *tiling_flags = 0;
+ *tmz_surface = false;
+ *gfx12_dcc = false;
+ return 0;
+ }
+
+ rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
+ r = amdgpu_bo_reserve(rbo, false);
+
+ if (unlikely(r)) {
+ /* Don't show error message when returning -ERESTARTSYS */
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Unable to reserve buffer: %d\n", r);
+ return r;
+ }
+
+ amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
+ *tmz_surface = amdgpu_bo_encrypted(rbo);
+ *gfx12_dcc = rbo->flags & AMDGPU_GEM_CREATE_GFX12_DCC;
+
+ amdgpu_bo_unreserve(rbo);
+
+ return r;
+}
+
+static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
+ struct amdgpu_framebuffer *rfb,
+ struct drm_file *file_priv,
+ const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+
+ rfb->base.obj[0] = obj;
+ drm_helper_mode_fill_fb_struct(dev, &rfb->base, info, mode_cmd);
+ /* Verify that the modifier is supported. */
+ if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
+ mode_cmd->modifier[0])) {
+ drm_dbg_kms(dev,
+ "unsupported pixel format %p4cc / modifier 0x%llx\n",
+ &mode_cmd->pixel_format, mode_cmd->modifier[0]);
+
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
+ if (ret)
+ goto err;
+
+ if (drm_drv_uses_atomic_modeset(dev))
+ ret = drm_framebuffer_init(dev, &rfb->base,
+ &amdgpu_fb_funcs_atomic);
+ else
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret);
+ rfb->base.obj[0] = NULL;
+ return ret;
+}
+
+static int amdgpu_display_framebuffer_init(struct drm_device *dev,
+ struct amdgpu_framebuffer *rfb,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int ret, i;
+
+ /*
+ * This needs to happen before modifier conversion as that might change
+ * the number of planes.
+ */
+ for (i = 1; i < rfb->base.format->num_planes; ++i) {
+ if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
+ drm_dbg_kms(dev, "Plane 0 and %d have different BOs: %u vs. %u\n",
+ i, mode_cmd->handles[0], mode_cmd->handles[i]);
+ ret = -EINVAL;
+ return ret;
+ }
+ }
+
+ ret = amdgpu_display_get_fb_info(rfb, &rfb->tiling_flags, &rfb->tmz_surface,
+ &rfb->gfx12_dcc);
+ if (ret)
+ return ret;
+
+ if (dev->mode_config.fb_modifiers_not_supported && !adev->enable_virtual_display) {
+ drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
+ "GFX9+ requires FB check based on format modifier\n");
+ ret = check_tiling_flags_gfx6(rfb);
+ if (ret)
+ return ret;
+ }
+
+ if (!dev->mode_config.fb_modifiers_not_supported &&
+ !(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0))
+ ret = convert_tiling_flags_to_modifier_gfx12(rfb);
+ else
+ ret = convert_tiling_flags_to_modifier(rfb);
+
+ if (ret) {
+ drm_dbg_kms(dev, "Failed to convert tiling flags 0x%llX to a modifier",
+ rfb->tiling_flags);
+ return ret;
+ }
+ }
+
+ ret = amdgpu_display_verify_sizes(rfb);
+ if (ret)
return ret;
+
+ for (i = 0; i < rfb->base.format->num_planes; ++i) {
+ drm_gem_object_get(rfb->base.obj[0]);
+ rfb->base.obj[i] = rfb->base.obj[0];
}
+
return 0;
}
-static struct drm_framebuffer *
-amdgpu_user_framebuffer_create(struct drm_device *dev,
- struct drm_file *file_priv,
- const struct drm_mode_fb_cmd2 *mode_cmd)
+struct drm_framebuffer *
+amdgpu_display_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct drm_gem_object *obj;
struct amdgpu_framebuffer *amdgpu_fb;
+ struct drm_gem_object *obj;
+ struct amdgpu_bo *bo;
+ uint32_t domains;
int ret;
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (obj == NULL) {
- dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
- "can't create framebuffer\n", mode_cmd->handles[0]);
+ drm_dbg_kms(dev,
+ "No GEM object associated to handle 0x%08X, can't create framebuffer\n",
+ mode_cmd->handles[0]);
+
return ERR_PTR(-ENOENT);
}
/* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
- if (obj->import_attach) {
- DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
+ bo = gem_to_amdgpu_bo(obj);
+ domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
+ if (drm_gem_is_imported(obj) && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
+ drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
+ drm_gem_object_put(obj);
return ERR_PTR(-EINVAL);
}
amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
if (amdgpu_fb == NULL) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put(obj);
return ERR_PTR(-ENOMEM);
}
- ret = amdgpu_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj);
+ ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv,
+ info, mode_cmd, obj);
if (ret) {
kfree(amdgpu_fb);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put(obj);
return ERR_PTR(ret);
}
+ drm_gem_object_put(obj);
return &amdgpu_fb->base;
}
-static void amdgpu_output_poll_changed(struct drm_device *dev)
-{
- struct amdgpu_device *adev = dev->dev_private;
- amdgpu_fb_output_poll_changed(adev);
-}
-
const struct drm_mode_config_funcs amdgpu_mode_funcs = {
- .fb_create = amdgpu_user_framebuffer_create,
- .output_poll_changed = amdgpu_output_poll_changed
+ .fb_create = amdgpu_display_user_framebuffer_create,
};
-static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
-{ { UNDERSCAN_OFF, "off" },
+static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] = {
+ { UNDERSCAN_OFF, "off" },
{ UNDERSCAN_ON, "on" },
{ UNDERSCAN_AUTO, "auto" },
};
-static const struct drm_prop_enum_list amdgpu_audio_enum_list[] =
-{ { AMDGPU_AUDIO_DISABLE, "off" },
+static const struct drm_prop_enum_list amdgpu_audio_enum_list[] = {
+ { AMDGPU_AUDIO_DISABLE, "off" },
{ AMDGPU_AUDIO_ENABLE, "on" },
{ AMDGPU_AUDIO_AUTO, "auto" },
};
/* XXX support different dither options? spatial, temporal, both, etc. */
-static const struct drm_prop_enum_list amdgpu_dither_enum_list[] =
-{ { AMDGPU_FMT_DITHER_DISABLE, "off" },
+static const struct drm_prop_enum_list amdgpu_dither_enum_list[] = {
+ { AMDGPU_FMT_DITHER_DISABLE, "off" },
{ AMDGPU_FMT_DITHER_ENABLE, "on" },
};
-int amdgpu_modeset_create_props(struct amdgpu_device *adev)
+int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
{
int sz;
adev->mode_info.coherent_mode_property =
- drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1);
+ drm_property_create_range(adev_to_drm(adev), 0, "coherent", 0, 1);
if (!adev->mode_info.coherent_mode_property)
return -ENOMEM;
adev->mode_info.load_detect_property =
- drm_property_create_range(adev->ddev, 0, "load detection", 0, 1);
+ drm_property_create_range(adev_to_drm(adev), 0, "load detection", 0, 1);
if (!adev->mode_info.load_detect_property)
return -ENOMEM;
- drm_mode_create_scaling_mode_property(adev->ddev);
+ drm_mode_create_scaling_mode_property(adev_to_drm(adev));
sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
adev->mode_info.underscan_property =
- drm_property_create_enum(adev->ddev, 0,
- "underscan",
- amdgpu_underscan_enum_list, sz);
+ drm_property_create_enum(adev_to_drm(adev), 0,
+ "underscan",
+ amdgpu_underscan_enum_list, sz);
adev->mode_info.underscan_hborder_property =
- drm_property_create_range(adev->ddev, 0,
- "underscan hborder", 0, 128);
+ drm_property_create_range(adev_to_drm(adev), 0,
+ "underscan hborder", 0, 128);
if (!adev->mode_info.underscan_hborder_property)
return -ENOMEM;
adev->mode_info.underscan_vborder_property =
- drm_property_create_range(adev->ddev, 0,
- "underscan vborder", 0, 128);
+ drm_property_create_range(adev_to_drm(adev), 0,
+ "underscan vborder", 0, 128);
if (!adev->mode_info.underscan_vborder_property)
return -ENOMEM;
sz = ARRAY_SIZE(amdgpu_audio_enum_list);
adev->mode_info.audio_property =
- drm_property_create_enum(adev->ddev, 0,
+ drm_property_create_enum(adev_to_drm(adev), 0,
"audio",
amdgpu_audio_enum_list, sz);
sz = ARRAY_SIZE(amdgpu_dither_enum_list);
adev->mode_info.dither_property =
- drm_property_create_enum(adev->ddev, 0,
+ drm_property_create_enum(adev_to_drm(adev), 0,
"dither",
amdgpu_dither_enum_list, sz);
return 0;
}
-void amdgpu_update_display_priority(struct amdgpu_device *adev)
+void amdgpu_display_update_priority(struct amdgpu_device *adev)
{
/* adjustment options for the display watermarks */
if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2))
@@ -644,7 +1424,7 @@ void amdgpu_update_display_priority(struct amdgpu_device *adev)
}
-static bool is_hdtv_mode(const struct drm_display_mode *mode)
+static bool amdgpu_display_is_hdtv_mode(const struct drm_display_mode *mode)
{
/* try and guess if this is a tv or a monitor */
if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
@@ -656,16 +1436,15 @@ static bool is_hdtv_mode(const struct drm_display_mode *mode)
return false;
}
-bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_encoder *amdgpu_encoder;
struct drm_connector *connector;
- struct amdgpu_connector *amdgpu_connector;
u32 src_v = 1, dst_v = 1;
u32 src_h = 1, dst_h = 1;
@@ -677,7 +1456,6 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
continue;
amdgpu_encoder = to_amdgpu_encoder(encoder);
connector = amdgpu_get_connector_for_encoder(encoder);
- amdgpu_connector = to_amdgpu_connector(connector);
/* set scaling */
if (amdgpu_encoder->rmx_type == RMX_OFF)
@@ -700,8 +1478,8 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
- drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
- is_hdtv_mode(mode)))) {
+ connector && connector->display_info.is_hdmi &&
+ amdgpu_display_is_hdtv_mode(mode)))) {
if (amdgpu_encoder->underscan_hborder != 0)
amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
else
@@ -719,6 +1497,7 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
}
if (amdgpu_crtc->rmx_type != RMX_OFF) {
fixed20_12 a, b;
+
a.full = dfixed_const(src_v);
b.full = dfixed_const(dst_v);
amdgpu_crtc->vsc.full = dfixed_div(a, b);
@@ -738,7 +1517,7 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
*
* \param dev Device to query.
* \param pipe Crtc to query.
- * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
+ * \param flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
* For driver internal use only also supports these flags:
*
* USE_REAL_VBLANKSTART to use the real start of vblank instead
@@ -769,16 +1548,16 @@ bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
* unknown small number of scanlines wrt. real scanout position.
*
*/
-int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
- unsigned int flags, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
+int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
+ unsigned int pipe, unsigned int flags, int *vpos,
+ int *hpos, ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
{
u32 vbl = 0, position = 0;
int vbl_start, vbl_end, vtotal, ret = 0;
bool in_vbl = true;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
@@ -805,8 +1584,7 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
ret |= DRM_SCANOUTPOS_ACCURATE;
vbl_start = vbl & 0x1fff;
vbl_end = (vbl >> 16) & 0x1fff;
- }
- else {
+ } else {
/* No: Fake something reasonable which gives at least ok results. */
vbl_start = mode->crtc_vdisplay;
vbl_end = 0;
@@ -814,8 +1592,8 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
/* Called from driver internal vblank counter query code? */
if (flags & GET_DISTANCE_TO_VBLANKSTART) {
- /* Caller wants distance from real vbl_start in *hpos */
- *hpos = *vpos - vbl_start;
+ /* Caller wants distance from real vbl_start in *hpos */
+ *hpos = *vpos - vbl_start;
}
/* Fudge vblank to start a few scanlines earlier to handle the
@@ -837,7 +1615,7 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
/* In vblank? */
if (in_vbl)
- ret |= DRM_SCANOUTPOS_IN_VBLANK;
+ ret |= DRM_SCANOUTPOS_IN_VBLANK;
/* Called from driver internal vblank counter query code? */
if (flags & GET_DISTANCE_TO_VBLANKSTART) {
@@ -855,7 +1633,12 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
/* Inside "upper part" of vblank area? Apply corrective offset if so: */
if (in_vbl && (*vpos >= vbl_start)) {
vtotal = mode->crtc_vtotal;
- *vpos = *vpos - vtotal;
+
+ /* With variable refresh rate displays the vpos can exceed
+ * the vtotal value. Clamp to 0 to return -vbl_end instead
+ * of guessing the remaining number of lines until scanout.
+ */
+ *vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0;
}
/* Correct for shifted end of vbl at vbl_end. */
@@ -864,7 +1647,7 @@ int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
return ret;
}
-int amdgpu_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
+int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
{
if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
return AMDGPU_CRTC_IRQ_NONE;
@@ -886,3 +1669,204 @@ int amdgpu_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
return AMDGPU_CRTC_IRQ_NONE;
}
}
+
+bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq, int *vpos,
+ int *hpos, ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
+
+ return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
+ stime, etime, mode);
+}
+
+static bool
+amdgpu_display_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
+{
+ struct drm_device *dev = adev_to_drm(adev);
+ struct drm_fb_helper *fb_helper = dev->fb_helper;
+
+ if (!fb_helper || !fb_helper->buffer)
+ return false;
+
+ if (gem_to_amdgpu_bo(fb_helper->buffer->gem) != robj)
+ return false;
+
+ return true;
+}
+
+int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev_to_drm(adev);
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ int r;
+
+ drm_kms_helper_poll_disable(dev);
+
+ /* turn off display hw */
+ drm_modeset_lock_all(dev);
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
+ drm_helper_connector_dpms(connector,
+ DRM_MODE_DPMS_OFF);
+ drm_connector_list_iter_end(&iter);
+ drm_modeset_unlock_all(dev);
+ /* unpin the front buffers and cursors */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct amdgpu_bo *robj;
+
+ if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
+ struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+
+ r = amdgpu_bo_reserve(aobj, true);
+ if (r == 0) {
+ amdgpu_bo_unpin(aobj);
+ amdgpu_bo_unreserve(aobj);
+ }
+ }
+
+ if (!fb || !fb->obj[0])
+ continue;
+
+ robj = gem_to_amdgpu_bo(fb->obj[0]);
+ if (!amdgpu_display_robj_is_fb(adev, robj)) {
+ r = amdgpu_bo_reserve(robj, true);
+ if (r == 0) {
+ amdgpu_bo_unpin(robj);
+ amdgpu_bo_unreserve(robj);
+ }
+ }
+ }
+ return 0;
+}
+
+int amdgpu_display_resume_helper(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev_to_drm(adev);
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ struct drm_crtc *crtc;
+ int r;
+
+ /* pin cursors */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
+ struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+
+ r = amdgpu_bo_reserve(aobj, true);
+ if (r == 0) {
+ aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
+ if (r != 0)
+ dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
+ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
+ amdgpu_bo_unreserve(aobj);
+ }
+ }
+ }
+
+ drm_helper_resume_force_mode(dev);
+
+ /* turn on display hw */
+ drm_modeset_lock_all(dev);
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
+ drm_helper_connector_dpms(connector,
+ DRM_MODE_DPMS_ON);
+ drm_connector_list_iter_end(&iter);
+
+ drm_modeset_unlock_all(dev);
+
+ drm_kms_helper_poll_enable(dev);
+
+ return 0;
+}
+
+/* panic_bo is set in amdgpu_dm_plane_get_scanout_buffer() and only used in amdgpu_dm_set_pixel()
+ * they are called from the panic handler, and protected by the drm_panic spinlock.
+ */
+static struct amdgpu_bo *panic_abo;
+
+/* Use the indirect MMIO to write each pixel to the GPU VRAM,
+ * This is a simplified version of amdgpu_device_mm_access()
+ */
+static void amdgpu_display_set_pixel(struct drm_scanout_buffer *sb,
+ unsigned int x,
+ unsigned int y,
+ u32 color)
+{
+ struct amdgpu_res_cursor cursor;
+ unsigned long offset;
+ struct amdgpu_bo *abo = panic_abo;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+ uint32_t tmp;
+
+ offset = x * 4 + y * sb->pitch[0];
+ amdgpu_res_first(abo->tbo.resource, offset, 4, &cursor);
+
+ tmp = cursor.start >> 31;
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t) cursor.start) | 0x80000000);
+ if (tmp != 0xffffffff)
+ WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
+ WREG32_NO_KIQ(mmMM_DATA, color);
+}
+
+int amdgpu_display_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct amdgpu_bo *abo;
+ struct drm_framebuffer *fb = plane->state->fb;
+
+ if (!fb)
+ return -EINVAL;
+
+ DRM_DEBUG_KMS("Framebuffer %dx%d %p4cc\n", fb->width, fb->height, &fb->format->format);
+
+ abo = gem_to_amdgpu_bo(fb->obj[0]);
+ if (!abo)
+ return -EINVAL;
+
+ sb->width = fb->width;
+ sb->height = fb->height;
+ /* Use the generic linear format, because tiling will be disabled in panic_flush() */
+ sb->format = drm_format_info(fb->format->format);
+ if (!sb->format)
+ return -EINVAL;
+
+ sb->pitch[0] = fb->pitches[0];
+
+ if (abo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) {
+ if (abo->tbo.resource->mem_type != TTM_PL_VRAM) {
+ drm_warn(plane->dev, "amdgpu panic, framebuffer not in VRAM\n");
+ return -EINVAL;
+ }
+ /* Only handle 32bits format, to simplify mmio access */
+ if (fb->format->cpp[0] != 4) {
+ drm_warn(plane->dev, "amdgpu panic, pixel format is not 32bits\n");
+ return -EINVAL;
+ }
+ sb->set_pixel = amdgpu_display_set_pixel;
+ panic_abo = abo;
+ return 0;
+ }
+ if (!abo->kmap.virtual &&
+ ttm_bo_kmap(&abo->tbo, 0, PFN_UP(abo->tbo.base.size), &abo->kmap)) {
+ drm_warn(plane->dev, "amdgpu bo map failed, panic won't be displayed\n");
+ return -ENOMEM;
+ }
+ if (abo->kmap.bo_kmap_type & TTM_BO_MAP_IOMEM_MASK)
+ iosys_map_set_vaddr_iomem(&sb->map[0], abo->kmap.virtual);
+ else
+ iosys_map_set_vaddr(&sb->map[0], abo->kmap.virtual);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
new file mode 100644
index 000000000000..930c171473b4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_DISPLAY_H__
+#define __AMDGPU_DISPLAY_H__
+
+#include <drm/drm_panic.h>
+
+#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
+#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
+#define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
+#define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
+#define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h))
+#define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev))
+#define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev))
+#define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async))
+#define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
+#define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
+#define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
+
+void amdgpu_display_hotplug_work_func(struct work_struct *work);
+void amdgpu_display_update_priority(struct amdgpu_device *adev);
+uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
+ uint64_t bo_flags);
+struct drm_framebuffer *
+amdgpu_display_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ const struct drm_format_info *info,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
+const struct drm_format_info *
+amdgpu_lookup_format_info(u32 format, uint64_t modifier);
+
+int amdgpu_display_suspend_helper(struct amdgpu_device *adev);
+int amdgpu_display_resume_helper(struct amdgpu_device *adev);
+
+int amdgpu_display_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
new file mode 100644
index 000000000000..8561ad7f6180
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -0,0 +1,574 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * based on nouveau_prime.c
+ *
+ * Authors: Alex Deucher
+ */
+
+/**
+ * DOC: PRIME Buffer Sharing
+ *
+ * The following callback implementations are used for :ref:`sharing GEM buffer
+ * objects between different devices via PRIME <prime_buffer_sharing>`.
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_display.h"
+#include "amdgpu_gem.h"
+#include "amdgpu_dma_buf.h"
+#include "amdgpu_xgmi.h"
+#include "amdgpu_vm.h"
+#include <drm/amdgpu_drm.h>
+#include <drm/ttm/ttm_tt.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-fence-array.h>
+#include <linux/pci-p2pdma.h>
+
+static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops;
+
+/**
+ * dma_buf_attach_adev - Helper to get adev of an attachment
+ *
+ * @attach: attachment
+ *
+ * Returns:
+ * A struct amdgpu_device * if the attaching device is an amdgpu device or
+ * partition, NULL otherwise.
+ */
+static struct amdgpu_device *dma_buf_attach_adev(struct dma_buf_attachment *attach)
+{
+ if (attach->importer_ops == &amdgpu_dma_buf_attach_ops) {
+ struct drm_gem_object *obj = attach->importer_priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ return amdgpu_ttm_adev(bo->tbo.bdev);
+ }
+
+ return NULL;
+}
+
+/**
+ * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
+ *
+ * @dmabuf: DMA-buf where we attach to
+ * @attach: attachment to add
+ *
+ * Add the attachment as user to the exported DMA-buf.
+ */
+static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attach)
+{
+ struct amdgpu_device *attach_adev = dma_buf_attach_adev(attach);
+ struct drm_gem_object *obj = dmabuf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
+ if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) &&
+ pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0)
+ attach->peer2peer = false;
+
+ amdgpu_vm_bo_update_shared(bo);
+
+ return 0;
+}
+
+/**
+ * amdgpu_dma_buf_pin - &dma_buf_ops.pin implementation
+ *
+ * @attach: attachment to pin down
+ *
+ * Pin the BO which is backing the DMA-buf so that it can't move any more.
+ */
+static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
+{
+ struct dma_buf *dmabuf = attach->dmabuf;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(dmabuf->priv);
+ u32 domains = bo->allowed_domains;
+
+ dma_resv_assert_held(dmabuf->resv);
+
+ /* Try pinning into VRAM to allow P2P with RDMA NICs without ODP
+ * support if all attachments can do P2P. If any attachment can't do
+ * P2P just pin into GTT instead.
+ *
+ * To avoid with conflicting pinnings between GPUs and RDMA when move
+ * notifiers are disabled, only allow pinning in VRAM when move
+ * notiers are enabled.
+ */
+ if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
+ domains &= ~AMDGPU_GEM_DOMAIN_VRAM;
+ } else {
+ list_for_each_entry(attach, &dmabuf->attachments, node)
+ if (!attach->peer2peer)
+ domains &= ~AMDGPU_GEM_DOMAIN_VRAM;
+ }
+
+ if (domains & AMDGPU_GEM_DOMAIN_VRAM)
+ bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+
+ if (WARN_ON(!domains))
+ return -EINVAL;
+
+ return amdgpu_bo_pin(bo, domains);
+}
+
+/**
+ * amdgpu_dma_buf_unpin - &dma_buf_ops.unpin implementation
+ *
+ * @attach: attachment to unpin
+ *
+ * Unpin a previously pinned BO to make it movable again.
+ */
+static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ amdgpu_bo_unpin(bo);
+}
+
+/**
+ * amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation
+ * @attach: DMA-buf attachment
+ * @dir: DMA direction
+ *
+ * Makes sure that the shared DMA buffer can be accessed by the target device.
+ * For now, simply pins it to the GTT domain, where it should be accessible by
+ * all DMA devices.
+ *
+ * Returns:
+ * sg_table filled with the DMA addresses to use or ERR_PRT with negative error
+ * code.
+ */
+static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct dma_buf *dma_buf = attach->dmabuf;
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct sg_table *sgt;
+ long r;
+
+ if (!bo->tbo.pin_count) {
+ /* move buffer into GTT or VRAM */
+ struct ttm_operation_ctx ctx = { false, false };
+ unsigned int domains = AMDGPU_GEM_DOMAIN_GTT;
+
+ if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
+ attach->peer2peer) {
+ bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ domains |= AMDGPU_GEM_DOMAIN_VRAM;
+ }
+ amdgpu_bo_placement_from_domain(bo, domains);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (r)
+ return ERR_PTR(r);
+ }
+
+ switch (bo->tbo.resource->mem_type) {
+ case TTM_PL_TT:
+ sgt = drm_prime_pages_to_sg(obj->dev,
+ bo->tbo.ttm->pages,
+ bo->tbo.ttm->num_pages);
+ if (IS_ERR(sgt))
+ return sgt;
+
+ if (dma_map_sgtable(attach->dev, sgt, dir,
+ DMA_ATTR_SKIP_CPU_SYNC))
+ goto error_free;
+ break;
+
+ case TTM_PL_VRAM:
+ /* XGMI-accessible memory should never be DMA-mapped */
+ if (WARN_ON(amdgpu_dmabuf_is_xgmi_accessible(
+ dma_buf_attach_adev(attach), bo)))
+ return ERR_PTR(-EINVAL);
+
+ r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0,
+ bo->tbo.base.size, attach->dev,
+ dir, &sgt);
+ if (r)
+ return ERR_PTR(r);
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ return sgt;
+
+error_free:
+ sg_free_table(sgt);
+ kfree(sgt);
+ return ERR_PTR(-EBUSY);
+}
+
+/**
+ * amdgpu_dma_buf_unmap - &dma_buf_ops.unmap_dma_buf implementation
+ * @attach: DMA-buf attachment
+ * @sgt: sg_table to unmap
+ * @dir: DMA direction
+ *
+ * This is called when a shared DMA buffer no longer needs to be accessible by
+ * another device. For now, simply unpins the buffer from GTT.
+ */
+static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ if (sg_page(sgt->sgl)) {
+ dma_unmap_sgtable(attach->dev, sgt, dir, 0);
+ sg_free_table(sgt);
+ kfree(sgt);
+ } else {
+ amdgpu_vram_mgr_free_sgt(attach->dev, dir, sgt);
+ }
+}
+
+/**
+ * amdgpu_dma_buf_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation
+ * @dma_buf: Shared DMA buffer
+ * @direction: Direction of DMA transfer
+ *
+ * This is called before CPU access to the shared DMA buffer's memory. If it's
+ * a read access, the buffer is moved to the GTT domain if possible, for optimal
+ * CPU read performance.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
+ enum dma_data_direction direction)
+{
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_operation_ctx ctx = { true, false };
+ u32 domain = amdgpu_display_supported_domains(adev, bo->flags);
+ int ret;
+ bool reads = (direction == DMA_BIDIRECTIONAL ||
+ direction == DMA_FROM_DEVICE);
+
+ if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
+ return 0;
+
+ /* move to gtt */
+ ret = amdgpu_bo_reserve(bo, false);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (!bo->tbo.pin_count &&
+ (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ }
+
+ amdgpu_bo_unreserve(bo);
+ return ret;
+}
+
+static int amdgpu_dma_buf_vmap(struct dma_buf *dma_buf, struct iosys_map *map)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ int ret;
+
+ /*
+ * Pin to keep buffer in place while it's vmap'ed. The actual
+ * domain is not that important as long as it's mapable. Using
+ * GTT and VRAM should be compatible with most use cases.
+ */
+ ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM);
+ if (ret)
+ return ret;
+ ret = drm_gem_dmabuf_vmap(dma_buf, map);
+ if (ret)
+ amdgpu_bo_unpin(bo);
+
+ return ret;
+}
+
+static void amdgpu_dma_buf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ drm_gem_dmabuf_vunmap(dma_buf, map);
+ amdgpu_bo_unpin(bo);
+}
+
+const struct dma_buf_ops amdgpu_dmabuf_ops = {
+ .attach = amdgpu_dma_buf_attach,
+ .pin = amdgpu_dma_buf_pin,
+ .unpin = amdgpu_dma_buf_unpin,
+ .map_dma_buf = amdgpu_dma_buf_map,
+ .unmap_dma_buf = amdgpu_dma_buf_unmap,
+ .release = drm_gem_dmabuf_release,
+ .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access,
+ .mmap = drm_gem_dmabuf_mmap,
+ .vmap = amdgpu_dma_buf_vmap,
+ .vunmap = amdgpu_dma_buf_vunmap,
+};
+
+/**
+ * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation
+ * @gobj: GEM BO
+ * @flags: Flags such as DRM_CLOEXEC and DRM_RDWR.
+ *
+ * The main work is done by the &drm_gem_prime_export helper.
+ *
+ * Returns:
+ * Shared DMA buffer representing the GEM BO from the given device.
+ */
+struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
+ int flags)
+{
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
+ struct dma_buf *buf;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = true,
+ /* We opt to avoid OOM on system pages allocations */
+ .gfp_retry_mayfail = true,
+ .allow_res_evict = false,
+ };
+ int ret;
+
+ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
+ bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
+ return ERR_PTR(-EPERM);
+
+ ret = ttm_bo_setup_export(&bo->tbo, &ctx);
+ if (ret)
+ return ERR_PTR(ret);
+
+ buf = drm_gem_prime_export(gobj, flags);
+ if (!IS_ERR(buf))
+ buf->ops = &amdgpu_dmabuf_ops;
+
+ return buf;
+}
+
+/**
+ * amdgpu_dma_buf_create_obj - create BO for DMA-buf import
+ *
+ * @dev: DRM device
+ * @dma_buf: DMA-buf
+ *
+ * Creates an empty SG BO for DMA-buf import.
+ *
+ * Returns:
+ * A new GEM BO of the given DRM device, representing the memory
+ * described by the given DMA-buf attachment and scatter/gather table.
+ */
+static struct drm_gem_object *
+amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
+{
+ struct dma_resv *resv = dma_buf->resv;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct drm_gem_object *gobj;
+ struct amdgpu_bo *bo;
+ uint64_t flags = 0;
+ int ret;
+
+ dma_resv_lock(resv, NULL);
+
+ if (dma_buf->ops == &amdgpu_dmabuf_ops) {
+ struct amdgpu_bo *other = gem_to_amdgpu_bo(dma_buf->priv);
+
+ flags |= other->flags & (AMDGPU_GEM_CREATE_CPU_GTT_USWC |
+ AMDGPU_GEM_CREATE_COHERENT |
+ AMDGPU_GEM_CREATE_EXT_COHERENT |
+ AMDGPU_GEM_CREATE_UNCACHED);
+ }
+
+ ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_CPU, flags,
+ ttm_bo_type_sg, resv, &gobj, 0);
+ if (ret)
+ goto error;
+
+ bo = gem_to_amdgpu_bo(gobj);
+ bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
+ bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
+
+ dma_resv_unlock(resv);
+ return gobj;
+
+error:
+ dma_resv_unlock(resv);
+ return ERR_PTR(ret);
+}
+
+/**
+ * amdgpu_dma_buf_move_notify - &attach.move_notify implementation
+ *
+ * @attach: the DMA-buf attachment
+ *
+ * Invalidate the DMA-buf attachment, making sure that the we re-create the
+ * mapping before the next use.
+ */
+static void
+amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->importer_priv;
+ struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv);
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_operation_ctx ctx = { false, false };
+ struct ttm_placement placement = {};
+ struct amdgpu_vm_bo_base *bo_base;
+ int r;
+
+ /* FIXME: This should be after the "if", but needs a fix to make sure
+ * DMABuf imports are initialized in the right VM list.
+ */
+ amdgpu_vm_bo_invalidate(bo, false);
+ if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
+ return;
+
+ r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
+ if (r) {
+ DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r);
+ return;
+ }
+
+ for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
+ struct amdgpu_vm *vm = bo_base->vm;
+ struct dma_resv *resv = vm->root.bo->tbo.base.resv;
+
+ if (ticket) {
+ /* When we get an error here it means that somebody
+ * else is holding the VM lock and updating page tables
+ * So we can just continue here.
+ */
+ r = dma_resv_lock(resv, ticket);
+ if (r)
+ continue;
+
+ } else {
+ /* TODO: This is more problematic and we actually need
+ * to allow page tables updates without holding the
+ * lock.
+ */
+ if (!dma_resv_trylock(resv))
+ continue;
+ }
+
+ /* Reserve fences for two SDMA page table updates */
+ r = dma_resv_reserve_fences(resv, 2);
+ if (!r)
+ r = amdgpu_vm_clear_freed(adev, vm, NULL);
+ if (!r)
+ r = amdgpu_vm_handle_moved(adev, vm, ticket);
+
+ if (r && r != -EBUSY)
+ DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
+ r);
+
+ dma_resv_unlock(resv);
+ }
+}
+
+static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
+ .allow_peer2peer = true,
+ .move_notify = amdgpu_dma_buf_move_notify
+};
+
+/**
+ * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
+ * @dev: DRM device
+ * @dma_buf: Shared DMA buffer
+ *
+ * Import a dma_buf into a the driver and potentially create a new GEM object.
+ *
+ * Returns:
+ * GEM BO representing the shared DMA buffer for the given device.
+ */
+struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct drm_gem_object *obj;
+
+ if (dma_buf->ops == &amdgpu_dmabuf_ops) {
+ obj = dma_buf->priv;
+ if (obj->dev == dev) {
+ /*
+ * Importing dmabuf exported from out own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ drm_gem_object_get(obj);
+ return obj;
+ }
+ }
+
+ obj = amdgpu_dma_buf_create_obj(dev, dma_buf);
+ if (IS_ERR(obj))
+ return obj;
+
+ attach = dma_buf_dynamic_attach(dma_buf, dev->dev,
+ &amdgpu_dma_buf_attach_ops, obj);
+ if (IS_ERR(attach)) {
+ drm_gem_object_put(obj);
+ return ERR_CAST(attach);
+ }
+
+ get_dma_buf(dma_buf);
+ obj->import_attach = attach;
+ return obj;
+}
+
+/**
+ * amdgpu_dmabuf_is_xgmi_accessible - Check if xgmi available for P2P transfer
+ *
+ * @adev: amdgpu_device pointer of the importer
+ * @bo: amdgpu buffer object
+ *
+ * Returns:
+ * True if dmabuf accessible over xgmi, false otherwise.
+ */
+bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo)
+{
+ struct drm_gem_object *obj = &bo->tbo.base;
+ struct drm_gem_object *gobj;
+
+ if (!adev)
+ return false;
+
+ if (drm_gem_is_imported(obj)) {
+ struct dma_buf *dma_buf = obj->import_attach->dmabuf;
+
+ if (dma_buf->ops != &amdgpu_dmabuf_ops)
+ /* No XGMI with non AMD GPUs */
+ return false;
+
+ gobj = dma_buf->priv;
+ bo = gem_to_amdgpu_bo(gobj);
+ }
+
+ if (amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
+ (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM))
+ return true;
+
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
new file mode 100644
index 000000000000..3e93b9b407a9
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_DMA_BUF_H__
+#define __AMDGPU_DMA_BUF_H__
+
+#include <drm/drm_gem.h>
+
+struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj,
+ int flags);
+struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo);
+
+extern const struct dma_buf_ops amdgpu_dmabuf_ops;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
new file mode 100644
index 000000000000..2675689ef70f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell.h
@@ -0,0 +1,372 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGPU_DOORBELL_H
+#define AMDGPU_DOORBELL_H
+
+/*
+ * GPU doorbell structures, functions & helpers
+ */
+struct amdgpu_doorbell {
+ /* doorbell mmio */
+ resource_size_t base;
+ resource_size_t size;
+
+ /* Number of doorbells reserved for amdgpu kernel driver */
+ u32 num_kernel_doorbells;
+
+ /* Kernel doorbells */
+ struct amdgpu_bo *kernel_doorbells;
+
+ /* For CPU access of doorbells */
+ uint32_t *cpu_addr;
+};
+
+/* Reserved doorbells for amdgpu (including multimedia).
+ * KFD can use all the rest in the 2M doorbell bar.
+ * For asic before vega10, doorbell is 32-bit, so the
+ * index/offset is in dword. For vega10 and after, doorbell
+ * can be 64-bit, so the index defined is in qword.
+ */
+struct amdgpu_doorbell_index {
+ uint32_t kiq;
+ uint32_t mec_ring0;
+ uint32_t mec_ring1;
+ uint32_t mec_ring2;
+ uint32_t mec_ring3;
+ uint32_t mec_ring4;
+ uint32_t mec_ring5;
+ uint32_t mec_ring6;
+ uint32_t mec_ring7;
+ uint32_t userqueue_start;
+ uint32_t userqueue_end;
+ uint32_t gfx_ring0;
+ uint32_t gfx_ring1;
+ uint32_t gfx_userqueue_start;
+ uint32_t gfx_userqueue_end;
+ uint32_t sdma_engine[16];
+ uint32_t mes_ring0;
+ uint32_t mes_ring1;
+ uint32_t ih;
+ union {
+ struct {
+ uint32_t vcn_ring0_1;
+ uint32_t vcn_ring2_3;
+ uint32_t vcn_ring4_5;
+ uint32_t vcn_ring6_7;
+ } vcn;
+ struct {
+ uint32_t uvd_ring0_1;
+ uint32_t uvd_ring2_3;
+ uint32_t uvd_ring4_5;
+ uint32_t uvd_ring6_7;
+ uint32_t vce_ring0_1;
+ uint32_t vce_ring2_3;
+ uint32_t vce_ring4_5;
+ uint32_t vce_ring6_7;
+ } uvd_vce;
+ };
+ uint32_t vpe_ring;
+ uint32_t first_non_cp;
+ uint32_t last_non_cp;
+ uint32_t max_assignment;
+ /* Per engine SDMA doorbell size in dword */
+ uint32_t sdma_doorbell_range;
+ /* Per xcc doorbell size for KIQ/KCQ */
+ uint32_t xcc_doorbell_range;
+};
+
+enum AMDGPU_DOORBELL_ASSIGNMENT {
+ AMDGPU_DOORBELL_KIQ = 0x000,
+ AMDGPU_DOORBELL_HIQ = 0x001,
+ AMDGPU_DOORBELL_DIQ = 0x002,
+ AMDGPU_DOORBELL_MEC_RING0 = 0x010,
+ AMDGPU_DOORBELL_MEC_RING1 = 0x011,
+ AMDGPU_DOORBELL_MEC_RING2 = 0x012,
+ AMDGPU_DOORBELL_MEC_RING3 = 0x013,
+ AMDGPU_DOORBELL_MEC_RING4 = 0x014,
+ AMDGPU_DOORBELL_MEC_RING5 = 0x015,
+ AMDGPU_DOORBELL_MEC_RING6 = 0x016,
+ AMDGPU_DOORBELL_MEC_RING7 = 0x017,
+ AMDGPU_DOORBELL_GFX_RING0 = 0x020,
+ AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0,
+ AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1,
+ AMDGPU_DOORBELL_IH = 0x1E8,
+ AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF,
+ AMDGPU_DOORBELL_INVALID = 0xFFFF
+};
+
+enum AMDGPU_VEGA20_DOORBELL_ASSIGNMENT {
+
+ /* Compute + GFX: 0~255 */
+ AMDGPU_VEGA20_DOORBELL_KIQ = 0x000,
+ AMDGPU_VEGA20_DOORBELL_HIQ = 0x001,
+ AMDGPU_VEGA20_DOORBELL_DIQ = 0x002,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING0 = 0x003,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING1 = 0x004,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING2 = 0x005,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING3 = 0x006,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING4 = 0x007,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING5 = 0x008,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING6 = 0x009,
+ AMDGPU_VEGA20_DOORBELL_MEC_RING7 = 0x00A,
+ AMDGPU_VEGA20_DOORBELL_USERQUEUE_START = 0x00B,
+ AMDGPU_VEGA20_DOORBELL_USERQUEUE_END = 0x08A,
+ AMDGPU_VEGA20_DOORBELL_GFX_RING0 = 0x08B,
+ /* SDMA:256~335*/
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0 = 0x100,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE1 = 0x10A,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE2 = 0x114,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE3 = 0x11E,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE4 = 0x128,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE5 = 0x132,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE6 = 0x13C,
+ AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE7 = 0x146,
+ /* IH: 376~391 */
+ AMDGPU_VEGA20_DOORBELL_IH = 0x178,
+ /* MMSCH: 392~407
+ * overlap the doorbell assignment with VCN as they are mutually exclusive
+ * VCN engine's doorbell is 32 bit and two VCN ring share one QWORD
+ */
+ AMDGPU_VEGA20_DOORBELL64_VCN0_1 = 0x188, /* VNC0 */
+ AMDGPU_VEGA20_DOORBELL64_VCN2_3 = 0x189,
+ AMDGPU_VEGA20_DOORBELL64_VCN4_5 = 0x18A,
+ AMDGPU_VEGA20_DOORBELL64_VCN6_7 = 0x18B,
+
+ AMDGPU_VEGA20_DOORBELL64_VCN8_9 = 0x18C, /* VNC1 */
+ AMDGPU_VEGA20_DOORBELL64_VCNa_b = 0x18D,
+ AMDGPU_VEGA20_DOORBELL64_VCNc_d = 0x18E,
+ AMDGPU_VEGA20_DOORBELL64_VCNe_f = 0x18F,
+
+ AMDGPU_VEGA20_DOORBELL64_UVD_RING0_1 = 0x188,
+ AMDGPU_VEGA20_DOORBELL64_UVD_RING2_3 = 0x189,
+ AMDGPU_VEGA20_DOORBELL64_UVD_RING4_5 = 0x18A,
+ AMDGPU_VEGA20_DOORBELL64_UVD_RING6_7 = 0x18B,
+
+ AMDGPU_VEGA20_DOORBELL64_VCE_RING0_1 = 0x18C,
+ AMDGPU_VEGA20_DOORBELL64_VCE_RING2_3 = 0x18D,
+ AMDGPU_VEGA20_DOORBELL64_VCE_RING4_5 = 0x18E,
+ AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7 = 0x18F,
+
+ AMDGPU_VEGA20_DOORBELL64_FIRST_NON_CP = AMDGPU_VEGA20_DOORBELL_sDMA_ENGINE0,
+ AMDGPU_VEGA20_DOORBELL64_LAST_NON_CP = AMDGPU_VEGA20_DOORBELL64_VCE_RING6_7,
+
+ /* kiq/kcq from second XCD. Max 8 XCDs */
+ AMDGPU_VEGA20_DOORBELL_XCC1_KIQ_START = 0x190,
+ /* 8 compute rings per GC. Max to 0x1CE */
+ AMDGPU_VEGA20_DOORBELL_XCC1_MEC_RING0_START = 0x197,
+
+ /* AID1 SDMA: 0x1D0 ~ 0x1F7 */
+ AMDGPU_VEGA20_DOORBELL_AID1_sDMA_START = 0x1D0,
+
+ AMDGPU_VEGA20_DOORBELL_MAX_ASSIGNMENT = 0x1F7,
+ AMDGPU_VEGA20_DOORBELL_INVALID = 0xFFFF
+};
+
+enum AMDGPU_NAVI10_DOORBELL_ASSIGNMENT {
+
+ /* Compute + GFX: 0~255 */
+ AMDGPU_NAVI10_DOORBELL_KIQ = 0x000,
+ AMDGPU_NAVI10_DOORBELL_HIQ = 0x001,
+ AMDGPU_NAVI10_DOORBELL_DIQ = 0x002,
+ AMDGPU_NAVI10_DOORBELL_MEC_RING0 = 0x003,
+ AMDGPU_NAVI10_DOORBELL_MEC_RING1 = 0x004,
+ AMDGPU_NAVI10_DOORBELL_MEC_RING2 = 0x005,
+ AMDGPU_NAVI10_DOORBELL_MEC_RING3 = 0x006,
+ AMDGPU_NAVI10_DOORBELL_MEC_RING4 = 0x007,
+ AMDGPU_NAVI10_DOORBELL_MEC_RING5 = 0x008,
+ AMDGPU_NAVI10_DOORBELL_MEC_RING6 = 0x009,
+ AMDGPU_NAVI10_DOORBELL_MEC_RING7 = 0x00A,
+ AMDGPU_NAVI10_DOORBELL_MES_RING0 = 0x00B,
+ AMDGPU_NAVI10_DOORBELL_MES_RING1 = 0x00C,
+ AMDGPU_NAVI10_DOORBELL_USERQUEUE_START = 0x00D,
+ AMDGPU_NAVI10_DOORBELL_USERQUEUE_END = 0x08A,
+ AMDGPU_NAVI10_DOORBELL_GFX_RING0 = 0x08B,
+ AMDGPU_NAVI10_DOORBELL_GFX_RING1 = 0x08C,
+ AMDGPU_NAVI10_DOORBELL_GFX_USERQUEUE_START = 0x08D,
+ AMDGPU_NAVI10_DOORBELL_GFX_USERQUEUE_END = 0x0FF,
+
+ /* SDMA:256~335*/
+ AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0 = 0x100,
+ AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE1 = 0x10A,
+ AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE2 = 0x114,
+ AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE3 = 0x11E,
+ /* IH: 376~391 */
+ AMDGPU_NAVI10_DOORBELL_IH = 0x178,
+ /* MMSCH: 392~407
+ * overlap the doorbell assignment with VCN as they are mutually exclusive
+ * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
+ */
+ AMDGPU_NAVI10_DOORBELL64_VCN0_1 = 0x188, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
+ AMDGPU_NAVI10_DOORBELL64_VCN2_3 = 0x189,
+ AMDGPU_NAVI10_DOORBELL64_VCN4_5 = 0x18A,
+ AMDGPU_NAVI10_DOORBELL64_VCN6_7 = 0x18B,
+
+ AMDGPU_NAVI10_DOORBELL64_VCN8_9 = 0x18C,
+ AMDGPU_NAVI10_DOORBELL64_VCNa_b = 0x18D,
+ AMDGPU_NAVI10_DOORBELL64_VCNc_d = 0x18E,
+ AMDGPU_NAVI10_DOORBELL64_VCNe_f = 0x18F,
+
+ AMDGPU_NAVI10_DOORBELL64_VPE = 0x190,
+
+ AMDGPU_NAVI10_DOORBELL64_FIRST_NON_CP = AMDGPU_NAVI10_DOORBELL_sDMA_ENGINE0,
+ AMDGPU_NAVI10_DOORBELL64_LAST_NON_CP = AMDGPU_NAVI10_DOORBELL64_VPE,
+
+ AMDGPU_NAVI10_DOORBELL_MAX_ASSIGNMENT = AMDGPU_NAVI10_DOORBELL64_VPE,
+ AMDGPU_NAVI10_DOORBELL_INVALID = 0xFFFF
+};
+
+/*
+ * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space
+ */
+enum AMDGPU_DOORBELL64_ASSIGNMENT {
+ /*
+ * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in
+ * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range.
+ * Compute related doorbells are allocated from 0x00 to 0x8a
+ */
+
+
+ /* kernel scheduling */
+ AMDGPU_DOORBELL64_KIQ = 0x00,
+
+ /* HSA interface queue and debug queue */
+ AMDGPU_DOORBELL64_HIQ = 0x01,
+ AMDGPU_DOORBELL64_DIQ = 0x02,
+
+ /* Compute engines */
+ AMDGPU_DOORBELL64_MEC_RING0 = 0x03,
+ AMDGPU_DOORBELL64_MEC_RING1 = 0x04,
+ AMDGPU_DOORBELL64_MEC_RING2 = 0x05,
+ AMDGPU_DOORBELL64_MEC_RING3 = 0x06,
+ AMDGPU_DOORBELL64_MEC_RING4 = 0x07,
+ AMDGPU_DOORBELL64_MEC_RING5 = 0x08,
+ AMDGPU_DOORBELL64_MEC_RING6 = 0x09,
+ AMDGPU_DOORBELL64_MEC_RING7 = 0x0a,
+
+ /* User queue doorbell range (128 doorbells) */
+ AMDGPU_DOORBELL64_USERQUEUE_START = 0x0b,
+ AMDGPU_DOORBELL64_USERQUEUE_END = 0x8a,
+
+ /* Graphics engine */
+ AMDGPU_DOORBELL64_GFX_RING0 = 0x8b,
+
+ /*
+ * Other graphics doorbells can be allocated here: from 0x8c to 0xdf
+ * Graphics voltage island aperture 1
+ * default non-graphics QWORD index is 0xe0 - 0xFF inclusive
+ */
+
+ /* For vega10 sriov, the sdma doorbell must be fixed as follow
+ * to keep the same setting with host driver, or it will
+ * happen conflicts
+ */
+ AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xF0,
+ AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1,
+ AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xF2,
+ AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3,
+
+ /* Interrupt handler */
+ AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */
+ AMDGPU_DOORBELL64_IH_RING1 = 0xF5, /* For page migration request log */
+ AMDGPU_DOORBELL64_IH_RING2 = 0xF6, /* For page migration translation/invalidation log */
+
+ /* VCN engine use 32 bits doorbell */
+ AMDGPU_DOORBELL64_VCN0_1 = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
+ AMDGPU_DOORBELL64_VCN2_3 = 0xF9,
+ AMDGPU_DOORBELL64_VCN4_5 = 0xFA,
+ AMDGPU_DOORBELL64_VCN6_7 = 0xFB,
+
+ /* overlap the doorbell assignment with VCN as they are mutually exclusive
+ * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
+ */
+ AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8,
+ AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9,
+ AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA,
+ AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB,
+
+ AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC,
+ AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD,
+ AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE,
+ AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF,
+
+ AMDGPU_DOORBELL64_FIRST_NON_CP = AMDGPU_DOORBELL64_sDMA_ENGINE0,
+ AMDGPU_DOORBELL64_LAST_NON_CP = AMDGPU_DOORBELL64_VCE_RING6_7,
+
+ AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF,
+ AMDGPU_DOORBELL64_INVALID = 0xFFFF
+};
+
+enum AMDGPU_DOORBELL_ASSIGNMENT_LAYOUT1 {
+
+ /* XCC0: 0x00 ~20, XCC1: 20 ~ 2F ... */
+
+ /* KIQ/HIQ/DIQ */
+ AMDGPU_DOORBELL_LAYOUT1_KIQ_START = 0x000,
+ AMDGPU_DOORBELL_LAYOUT1_HIQ = 0x001,
+ AMDGPU_DOORBELL_LAYOUT1_DIQ = 0x002,
+ /* Compute: 0x08 ~ 0x20 */
+ AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START = 0x008,
+ AMDGPU_DOORBELL_LAYOUT1_MEC_RING_END = 0x00F,
+ AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START = 0x010,
+ AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END = 0x01F,
+ AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE = 0x020,
+
+ /* SDMA: 0x100 ~ 0x19F */
+ AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START = 0x100,
+ AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_END = 0x19F,
+ /* IH: 0x1A0 ~ 0x1AF */
+ AMDGPU_DOORBELL_LAYOUT1_IH = 0x1A0,
+ /* VCN: 0x1B0 ~ 0x1E8 */
+ AMDGPU_DOORBELL_LAYOUT1_VCN_START = 0x1B0,
+ AMDGPU_DOORBELL_LAYOUT1_VCN_END = 0x1E8,
+
+ AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP = AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START,
+ AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP = AMDGPU_DOORBELL_LAYOUT1_VCN_END,
+
+ AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT = 0x1E8,
+ AMDGPU_DOORBELL_LAYOUT1_INVALID = 0xFFFF
+};
+
+u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
+void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
+u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
+void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
+
+/*
+ * GPU doorbell aperture helpers function.
+ */
+int amdgpu_doorbell_init(struct amdgpu_device *adev);
+void amdgpu_doorbell_fini(struct amdgpu_device *adev);
+int amdgpu_doorbell_create_kernel_doorbells(struct amdgpu_device *adev);
+uint32_t amdgpu_doorbell_index_on_bar(struct amdgpu_device *adev,
+ struct amdgpu_bo *db_bo,
+ uint32_t doorbell_index,
+ uint32_t db_size);
+
+#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
+#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
+#define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index))
+#define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v))
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
new file mode 100644
index 000000000000..3040437d99c2
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+
+/**
+ * amdgpu_mm_rdoorbell - read a doorbell dword
+ *
+ * @adev: amdgpu_device pointer
+ * @index: doorbell index
+ *
+ * Returns the value in the doorbell aperture at the
+ * requested doorbell index (CIK).
+ */
+u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
+{
+ if (amdgpu_device_skip_hw_access(adev))
+ return 0;
+
+ if (index < adev->doorbell.num_kernel_doorbells)
+ return readl(adev->doorbell.cpu_addr + index);
+
+ dev_err(adev->dev, "reading beyond doorbell aperture: 0x%08x!\n",
+ index);
+ return 0;
+}
+
+/**
+ * amdgpu_mm_wdoorbell - write a doorbell dword
+ *
+ * @adev: amdgpu_device pointer
+ * @index: doorbell index
+ * @v: value to write
+ *
+ * Writes @v to the doorbell aperture at the
+ * requested doorbell index (CIK).
+ */
+void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
+{
+ if (amdgpu_device_skip_hw_access(adev))
+ return;
+
+ if (index < adev->doorbell.num_kernel_doorbells)
+ writel(v, adev->doorbell.cpu_addr + index);
+ else
+ dev_err(adev->dev,
+ "writing beyond doorbell aperture: 0x%08x!\n", index);
+}
+
+/**
+ * amdgpu_mm_rdoorbell64 - read a doorbell Qword
+ *
+ * @adev: amdgpu_device pointer
+ * @index: doorbell index
+ *
+ * Returns the value in the doorbell aperture at the
+ * requested doorbell index (VEGA10+).
+ */
+u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
+{
+ if (amdgpu_device_skip_hw_access(adev))
+ return 0;
+
+ if (index < adev->doorbell.num_kernel_doorbells)
+ return atomic64_read((atomic64_t *)(adev->doorbell.cpu_addr + index));
+
+ dev_err(adev->dev, "reading beyond doorbell aperture: 0x%08x!\n",
+ index);
+ return 0;
+}
+
+/**
+ * amdgpu_mm_wdoorbell64 - write a doorbell Qword
+ *
+ * @adev: amdgpu_device pointer
+ * @index: doorbell index
+ * @v: value to write
+ *
+ * Writes @v to the doorbell aperture at the
+ * requested doorbell index (VEGA10+).
+ */
+void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
+{
+ if (amdgpu_device_skip_hw_access(adev))
+ return;
+
+ if (index < adev->doorbell.num_kernel_doorbells)
+ atomic64_set((atomic64_t *)(adev->doorbell.cpu_addr + index), v);
+ else
+ dev_err(adev->dev,
+ "writing beyond doorbell aperture: 0x%08x!\n", index);
+}
+
+/**
+ * amdgpu_doorbell_index_on_bar - Find doorbell's absolute offset in BAR
+ *
+ * @adev: amdgpu_device pointer
+ * @db_bo: doorbell object's bo
+ * @doorbell_index: doorbell relative index in this doorbell object
+ * @db_size: doorbell size is in byte
+ *
+ * returns doorbell's absolute index in BAR
+ */
+uint32_t amdgpu_doorbell_index_on_bar(struct amdgpu_device *adev,
+ struct amdgpu_bo *db_bo,
+ uint32_t doorbell_index,
+ uint32_t db_size)
+{
+ int db_bo_offset;
+
+ db_bo_offset = amdgpu_bo_gpu_offset_no_check(db_bo);
+
+ /* doorbell index is 32 bit but doorbell's size can be 32 bit
+ * or 64 bit, so *db_size(in byte)/4 for alignment.
+ */
+ return db_bo_offset / sizeof(u32) + doorbell_index *
+ DIV_ROUND_UP(db_size, 4);
+}
+
+/**
+ * amdgpu_doorbell_create_kernel_doorbells - Create kernel doorbells for graphics
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Creates doorbells for graphics driver usages.
+ * returns 0 on success, error otherwise.
+ */
+int amdgpu_doorbell_create_kernel_doorbells(struct amdgpu_device *adev)
+{
+ int r;
+ int size;
+
+ /* SI HW does not have doorbells, skip allocation */
+ if (adev->doorbell.num_kernel_doorbells == 0)
+ return 0;
+
+ /* Reserve first num_kernel_doorbells (page-aligned) for kernel ops */
+ size = ALIGN(adev->doorbell.num_kernel_doorbells * sizeof(u32), PAGE_SIZE);
+
+ /* Allocate an extra page for MES kernel usages (ring test) */
+ adev->mes.db_start_dw_offset = size / sizeof(u32);
+ size += PAGE_SIZE;
+
+ r = amdgpu_bo_create_kernel(adev,
+ size,
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_DOORBELL,
+ &adev->doorbell.kernel_doorbells,
+ NULL,
+ (void **)&adev->doorbell.cpu_addr);
+ if (r) {
+ dev_err(adev->dev,
+ "Failed to allocate kernel doorbells, err=%d\n", r);
+ return r;
+ }
+
+ adev->doorbell.num_kernel_doorbells = size / sizeof(u32);
+ return 0;
+}
+
+/*
+ * GPU doorbell aperture helpers function.
+ */
+/**
+ * amdgpu_doorbell_init - Init doorbell driver information.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Init doorbell driver information (CIK)
+ * Returns 0 on success, error on failure.
+ */
+int amdgpu_doorbell_init(struct amdgpu_device *adev)
+{
+
+ /* No doorbell on SI hardware generation */
+ if (adev->asic_type < CHIP_BONAIRE) {
+ adev->doorbell.base = 0;
+ adev->doorbell.size = 0;
+ adev->doorbell.num_kernel_doorbells = 0;
+ return 0;
+ }
+
+ if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
+ return -EINVAL;
+
+ amdgpu_asic_init_doorbell_index(adev);
+
+ /* doorbell bar mapping */
+ adev->doorbell.base = pci_resource_start(adev->pdev, 2);
+ adev->doorbell.size = pci_resource_len(adev->pdev, 2);
+
+ adev->doorbell.num_kernel_doorbells =
+ min_t(u32, adev->doorbell.size / sizeof(u32),
+ adev->doorbell_index.max_assignment + 1);
+ if (adev->doorbell.num_kernel_doorbells == 0)
+ return -EINVAL;
+
+ /*
+ * For Vega, reserve and map two pages on doorbell BAR since SDMA
+ * paging queue doorbell use the second page. The
+ * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
+ * doorbells are in the first page. So with paging queue enabled,
+ * the max num_kernel_doorbells should + 1 page (0x400 in dword)
+ */
+ if (adev->asic_type >= CHIP_VEGA10)
+ adev->doorbell.num_kernel_doorbells += 0x400;
+
+ return 0;
+}
+
+/**
+ * amdgpu_doorbell_fini - Tear down doorbell driver information.
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Tear down doorbell driver information (CIK)
+ */
+void amdgpu_doorbell_fini(struct amdgpu_device *adev)
+{
+ amdgpu_bo_free_kernel(&adev->doorbell.kernel_doorbells,
+ NULL,
+ (void **)&adev->doorbell.cpu_addr);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
deleted file mode 100644
index 8c96a4caa715..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ /dev/null
@@ -1,556 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef __AMDGPU_DPM_H__
-#define __AMDGPU_DPM_H__
-
-enum amdgpu_int_thermal_type {
- THERMAL_TYPE_NONE,
- THERMAL_TYPE_EXTERNAL,
- THERMAL_TYPE_EXTERNAL_GPIO,
- THERMAL_TYPE_RV6XX,
- THERMAL_TYPE_RV770,
- THERMAL_TYPE_ADT7473_WITH_INTERNAL,
- THERMAL_TYPE_EVERGREEN,
- THERMAL_TYPE_SUMO,
- THERMAL_TYPE_NI,
- THERMAL_TYPE_SI,
- THERMAL_TYPE_EMC2103_WITH_INTERNAL,
- THERMAL_TYPE_CI,
- THERMAL_TYPE_KV,
-};
-
-enum amdgpu_dpm_auto_throttle_src {
- AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
- AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
-};
-
-enum amdgpu_dpm_event_src {
- AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
- AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
- AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
- AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
- AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
-};
-
-#define SCLK_DEEP_SLEEP_MASK 0x8
-
-struct amdgpu_ps {
- u32 caps; /* vbios flags */
- u32 class; /* vbios flags */
- u32 class2; /* vbios flags */
- /* UVD clocks */
- u32 vclk;
- u32 dclk;
- /* VCE clocks */
- u32 evclk;
- u32 ecclk;
- bool vce_active;
- enum amd_vce_level vce_level;
- /* asic priv */
- void *ps_priv;
-};
-
-struct amdgpu_dpm_thermal {
- /* thermal interrupt work */
- struct work_struct work;
- /* low temperature threshold */
- int min_temp;
- /* high temperature threshold */
- int max_temp;
- /* was last interrupt low to high or high to low */
- bool high_to_low;
- /* interrupt source */
- struct amdgpu_irq_src irq;
-};
-
-enum amdgpu_clk_action
-{
- AMDGPU_SCLK_UP = 1,
- AMDGPU_SCLK_DOWN
-};
-
-struct amdgpu_blacklist_clocks
-{
- u32 sclk;
- u32 mclk;
- enum amdgpu_clk_action action;
-};
-
-struct amdgpu_clock_and_voltage_limits {
- u32 sclk;
- u32 mclk;
- u16 vddc;
- u16 vddci;
-};
-
-struct amdgpu_clock_array {
- u32 count;
- u32 *values;
-};
-
-struct amdgpu_clock_voltage_dependency_entry {
- u32 clk;
- u16 v;
-};
-
-struct amdgpu_clock_voltage_dependency_table {
- u32 count;
- struct amdgpu_clock_voltage_dependency_entry *entries;
-};
-
-union amdgpu_cac_leakage_entry {
- struct {
- u16 vddc;
- u32 leakage;
- };
- struct {
- u16 vddc1;
- u16 vddc2;
- u16 vddc3;
- };
-};
-
-struct amdgpu_cac_leakage_table {
- u32 count;
- union amdgpu_cac_leakage_entry *entries;
-};
-
-struct amdgpu_phase_shedding_limits_entry {
- u16 voltage;
- u32 sclk;
- u32 mclk;
-};
-
-struct amdgpu_phase_shedding_limits_table {
- u32 count;
- struct amdgpu_phase_shedding_limits_entry *entries;
-};
-
-struct amdgpu_uvd_clock_voltage_dependency_entry {
- u32 vclk;
- u32 dclk;
- u16 v;
-};
-
-struct amdgpu_uvd_clock_voltage_dependency_table {
- u8 count;
- struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
-};
-
-struct amdgpu_vce_clock_voltage_dependency_entry {
- u32 ecclk;
- u32 evclk;
- u16 v;
-};
-
-struct amdgpu_vce_clock_voltage_dependency_table {
- u8 count;
- struct amdgpu_vce_clock_voltage_dependency_entry *entries;
-};
-
-struct amdgpu_ppm_table {
- u8 ppm_design;
- u16 cpu_core_number;
- u32 platform_tdp;
- u32 small_ac_platform_tdp;
- u32 platform_tdc;
- u32 small_ac_platform_tdc;
- u32 apu_tdp;
- u32 dgpu_tdp;
- u32 dgpu_ulv_power;
- u32 tj_max;
-};
-
-struct amdgpu_cac_tdp_table {
- u16 tdp;
- u16 configurable_tdp;
- u16 tdc;
- u16 battery_power_limit;
- u16 small_power_limit;
- u16 low_cac_leakage;
- u16 high_cac_leakage;
- u16 maximum_power_delivery_limit;
-};
-
-struct amdgpu_dpm_dynamic_state {
- struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
- struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
- struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
- struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
- struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
- struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
- struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
- struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
- struct amdgpu_clock_array valid_sclk_values;
- struct amdgpu_clock_array valid_mclk_values;
- struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
- struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
- u32 mclk_sclk_ratio;
- u32 sclk_mclk_delta;
- u16 vddc_vddci_delta;
- u16 min_vddc_for_pcie_gen2;
- struct amdgpu_cac_leakage_table cac_leakage_table;
- struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
- struct amdgpu_ppm_table *ppm_table;
- struct amdgpu_cac_tdp_table *cac_tdp_table;
-};
-
-struct amdgpu_dpm_fan {
- u16 t_min;
- u16 t_med;
- u16 t_high;
- u16 pwm_min;
- u16 pwm_med;
- u16 pwm_high;
- u8 t_hyst;
- u32 cycle_delay;
- u16 t_max;
- u8 control_mode;
- u16 default_max_fan_pwm;
- u16 default_fan_output_sensitivity;
- u16 fan_output_sensitivity;
- bool ucode_fan_control;
-};
-
-enum amdgpu_pcie_gen {
- AMDGPU_PCIE_GEN1 = 0,
- AMDGPU_PCIE_GEN2 = 1,
- AMDGPU_PCIE_GEN3 = 2,
- AMDGPU_PCIE_GEN_INVALID = 0xffff
-};
-
-struct amdgpu_dpm_funcs {
- int (*get_temperature)(struct amdgpu_device *adev);
- int (*pre_set_power_state)(struct amdgpu_device *adev);
- int (*set_power_state)(struct amdgpu_device *adev);
- void (*post_set_power_state)(struct amdgpu_device *adev);
- void (*display_configuration_changed)(struct amdgpu_device *adev);
- u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
- u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
- void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
- void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
- int (*force_performance_level)(struct amdgpu_device *adev, enum amd_dpm_forced_level level);
- bool (*vblank_too_short)(struct amdgpu_device *adev);
- void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
- void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
- void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
- void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
- u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
- int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
- int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
- int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
- int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
- int (*get_sclk_od)(struct amdgpu_device *adev);
- int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
- int (*get_mclk_od)(struct amdgpu_device *adev);
- int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
- int (*check_state_equal)(struct amdgpu_device *adev,
- struct amdgpu_ps *cps,
- struct amdgpu_ps *rps,
- bool *equal);
- int (*read_sensor)(struct amdgpu_device *adev, int idx, void *value,
- int *size);
-
- struct amd_vce_state* (*get_vce_clock_state)(struct amdgpu_device *adev, unsigned idx);
- int (*reset_power_profile_state)(struct amdgpu_device *adev,
- struct amd_pp_profile *request);
- int (*get_power_profile_state)(struct amdgpu_device *adev,
- struct amd_pp_profile *query);
- int (*set_power_profile_state)(struct amdgpu_device *adev,
- struct amd_pp_profile *request);
- int (*switch_power_profile)(struct amdgpu_device *adev,
- enum amd_pp_profile_type type);
-};
-
-#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
-#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
-#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
-#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
-#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
-#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
-#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
-
-#define amdgpu_dpm_read_sensor(adev, idx, value, size) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value), (size)) : \
- (adev)->pm.funcs->read_sensor((adev), (idx), (value), (size)))
-
-#define amdgpu_dpm_get_temperature(adev) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
- (adev)->pm.funcs->get_temperature((adev)))
-
-#define amdgpu_dpm_set_fan_control_mode(adev, m) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
- (adev)->pm.funcs->set_fan_control_mode((adev), (m)))
-
-#define amdgpu_dpm_get_fan_control_mode(adev) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
- (adev)->pm.funcs->get_fan_control_mode((adev)))
-
-#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
- (adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
-
-#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
- (adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
-
-#define amdgpu_dpm_get_fan_speed_rpm(adev, s) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_fan_speed_rpm((adev)->powerplay.pp_handle, (s)) : \
- -EINVAL)
-
-#define amdgpu_dpm_get_sclk(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->get_sclk((adev), (l)))
-
-#define amdgpu_dpm_get_mclk(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->get_mclk((adev), (l)))
-
-
-#define amdgpu_dpm_force_performance_level(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->force_performance_level((adev), (l)))
-
-#define amdgpu_dpm_powergate_uvd(adev, g) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
- (adev)->pm.funcs->powergate_uvd((adev), (g)))
-
-#define amdgpu_dpm_powergate_vce(adev, g) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
- (adev)->pm.funcs->powergate_vce((adev), (g)))
-
-#define amdgpu_dpm_get_current_power_state(adev) \
- (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
-
-#define amdgpu_dpm_get_pp_num_states(adev, data) \
- (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
-
-#define amdgpu_dpm_get_pp_table(adev, table) \
- (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
-
-#define amdgpu_dpm_set_pp_table(adev, buf, size) \
- (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
-
-#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
- (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
-
-#define amdgpu_dpm_force_clock_level(adev, type, level) \
- (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
-
-#define amdgpu_dpm_get_sclk_od(adev) \
- (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
-
-#define amdgpu_dpm_set_sclk_od(adev, value) \
- (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
-
-#define amdgpu_dpm_get_mclk_od(adev) \
- ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_mclk_od(adev, value) \
- ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
-
-#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
- (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
-
-#define amgdpu_dpm_check_state_equal(adev, cps, rps, equal) (adev)->pm.funcs->check_state_equal((adev), (cps),(rps),(equal))
-
-#define amdgpu_dpm_get_vce_clock_state(adev, i) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)) : \
- (adev)->pm.funcs->get_vce_clock_state((adev), (i)))
-
-#define amdgpu_dpm_get_performance_level(adev) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) : \
- (adev)->pm.dpm.forced_level)
-
-#define amdgpu_dpm_reset_power_profile_state(adev, request) \
- ((adev)->powerplay.pp_funcs->reset_power_profile_state(\
- (adev)->powerplay.pp_handle, request))
-
-#define amdgpu_dpm_get_power_profile_state(adev, query) \
- ((adev)->powerplay.pp_funcs->get_power_profile_state(\
- (adev)->powerplay.pp_handle, query))
-
-#define amdgpu_dpm_set_power_profile_state(adev, request) \
- ((adev)->powerplay.pp_funcs->set_power_profile_state(\
- (adev)->powerplay.pp_handle, request))
-
-#define amdgpu_dpm_switch_power_profile(adev, type) \
- ((adev)->powerplay.pp_funcs->switch_power_profile(\
- (adev)->powerplay.pp_handle, type))
-
-struct amdgpu_dpm {
- struct amdgpu_ps *ps;
- /* number of valid power states */
- int num_ps;
- /* current power state that is active */
- struct amdgpu_ps *current_ps;
- /* requested power state */
- struct amdgpu_ps *requested_ps;
- /* boot up power state */
- struct amdgpu_ps *boot_ps;
- /* default uvd power state */
- struct amdgpu_ps *uvd_ps;
- /* vce requirements */
- u32 num_of_vce_states;
- struct amd_vce_state vce_states[AMD_MAX_VCE_LEVELS];
- enum amd_vce_level vce_level;
- enum amd_pm_state_type state;
- enum amd_pm_state_type user_state;
- enum amd_pm_state_type last_state;
- enum amd_pm_state_type last_user_state;
- u32 platform_caps;
- u32 voltage_response_time;
- u32 backbias_response_time;
- void *priv;
- u32 new_active_crtcs;
- int new_active_crtc_count;
- u32 current_active_crtcs;
- int current_active_crtc_count;
- struct amdgpu_dpm_dynamic_state dyn_state;
- struct amdgpu_dpm_fan fan;
- u32 tdp_limit;
- u32 near_tdp_limit;
- u32 near_tdp_limit_adjusted;
- u32 sq_ramping_threshold;
- u32 cac_leakage;
- u16 tdp_od_limit;
- u32 tdp_adjustment;
- u16 load_line_slope;
- bool power_control;
- bool ac_power;
- /* special states active */
- bool thermal_active;
- bool uvd_active;
- bool vce_active;
- /* thermal handling */
- struct amdgpu_dpm_thermal thermal;
- /* forced levels */
- enum amd_dpm_forced_level forced_level;
-};
-
-struct amdgpu_pm {
- struct mutex mutex;
- u32 current_sclk;
- u32 current_mclk;
- u32 default_sclk;
- u32 default_mclk;
- struct amdgpu_i2c_chan *i2c_bus;
- /* internal thermal controller on rv6xx+ */
- enum amdgpu_int_thermal_type int_thermal_type;
- struct device *int_hwmon_dev;
- /* fan control parameters */
- bool no_fan;
- u8 fan_pulses_per_revolution;
- u8 fan_min_rpm;
- u8 fan_max_rpm;
- /* dpm */
- bool dpm_enabled;
- bool sysfs_initialized;
- struct amdgpu_dpm dpm;
- const struct firmware *fw; /* SMC firmware */
- uint32_t fw_version;
- const struct amdgpu_dpm_funcs *funcs;
- uint32_t pcie_gen_mask;
- uint32_t pcie_mlw_mask;
- struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
-};
-
-#define R600_SSTU_DFLT 0
-#define R600_SST_DFLT 0x00C8
-
-/* XXX are these ok? */
-#define R600_TEMP_RANGE_MIN (90 * 1000)
-#define R600_TEMP_RANGE_MAX (120 * 1000)
-
-#define FDO_PWM_MODE_STATIC 1
-#define FDO_PWM_MODE_STATIC_RPM 5
-
-enum amdgpu_td {
- AMDGPU_TD_AUTO,
- AMDGPU_TD_UP,
- AMDGPU_TD_DOWN,
-};
-
-enum amdgpu_display_watermark {
- AMDGPU_DISPLAY_WATERMARK_LOW = 0,
- AMDGPU_DISPLAY_WATERMARK_HIGH = 1,
-};
-
-enum amdgpu_display_gap
-{
- AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
- AMDGPU_PM_DISPLAY_GAP_VBLANK = 1,
- AMDGPU_PM_DISPLAY_GAP_WATERMARK = 2,
- AMDGPU_PM_DISPLAY_GAP_IGNORE = 3,
-};
-
-void amdgpu_dpm_print_class_info(u32 class, u32 class2);
-void amdgpu_dpm_print_cap_info(u32 caps);
-void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
- struct amdgpu_ps *rps);
-u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
-u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
-bool amdgpu_is_uvd_state(u32 class, u32 class2);
-void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
- u32 *p, u32 *u);
-int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th);
-
-bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor);
-
-int amdgpu_get_platform_caps(struct amdgpu_device *adev);
-
-int amdgpu_parse_extended_power_table(struct amdgpu_device *adev);
-void amdgpu_free_extended_power_table(struct amdgpu_device *adev);
-
-void amdgpu_add_thermal_controller(struct amdgpu_device *adev);
-
-enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
- u32 sys_mask,
- enum amdgpu_pcie_gen asic_gen,
- enum amdgpu_pcie_gen default_gen);
-
-u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
- u16 asic_lanes,
- u16 default_lanes);
-u8 amdgpu_encode_pci_lane_width(u32 lanes);
-
-struct amd_vce_state*
-amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx);
-
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index f2d705e6a75a..bff25ef3e2d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1,10 +1,3 @@
-/**
- * \file amdgpu_drv.c
- * AMD Amdgpu driver
- *
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
/*
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@@ -29,22 +22,38 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_ttm.h>
#include <drm/drm_gem.h>
-#include "amdgpu_drv.h"
-
+#include <drm/drm_managed.h>
#include <drm/drm_pciids.h>
-#include <linux/console.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include <linux/cc_platform.h>
+#include <linux/dynamic_debug.h>
#include <linux/module.h>
+#include <linux/mmu_notifier.h>
#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
#include <linux/vga_switcheroo.h>
-#include "drm_crtc_helper.h"
#include "amdgpu.h"
-#include "amdgpu_irq.h"
-
#include "amdgpu_amdkfd.h"
+#include "amdgpu_dma_buf.h"
+#include "amdgpu_drv.h"
+#include "amdgpu_fdinfo.h"
+#include "amdgpu_irq.h"
+#include "amdgpu_psp.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_sched.h"
+#include "amdgpu_xgmi.h"
+#include "amdgpu_userq.h"
+#include "amdgpu_userq_fence.h"
+#include "../amdxcp/amdgpu_xcp_drv.h"
/*
* KMS wrapper.
@@ -65,176 +74,1772 @@
* - 3.13.0 - Add PRT support
* - 3.14.0 - Fix race in amdgpu_ctx_get_fence() and note new functionality
* - 3.15.0 - Export more gpu info for gfx9
+ * - 3.16.0 - Add reserved vmid support
+ * - 3.17.0 - Add AMDGPU_NUM_VRAM_CPU_PAGE_FAULTS.
+ * - 3.18.0 - Export gpu always on cu bitmap
+ * - 3.19.0 - Add support for UVD MJPEG decode
+ * - 3.20.0 - Add support for local BOs
+ * - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
+ * - 3.22.0 - Add DRM_AMDGPU_SCHED ioctl
+ * - 3.23.0 - Add query for VRAM lost counter
+ * - 3.24.0 - Add high priority compute support for gfx9
+ * - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk).
+ * - 3.26.0 - GFX9: Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE.
+ * - 3.27.0 - Add new chunk to AMDGPU_CS to enable BO_LIST creation.
+ * - 3.28.0 - Add AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES
+ * - 3.29.0 - Add AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID
+ * - 3.30.0 - Add AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE.
+ * - 3.31.0 - Add support for per-flip tiling attribute changes with DC
+ * - 3.32.0 - Add syncobj timeline support to AMDGPU_CS.
+ * - 3.33.0 - Fixes for GDS ENOMEM failures in AMDGPU_CS.
+ * - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
+ * - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
+ * - 3.36.0 - Allow reading more status registers on si/cik
+ * - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness
+ * - 3.38.0 - Add AMDGPU_IB_FLAG_EMIT_MEM_SYNC
+ * - 3.39.0 - DMABUF implicit sync does a full pipeline sync
+ * - 3.40.0 - Add AMDGPU_IDS_FLAGS_TMZ
+ * - 3.41.0 - Add video codec query
+ * - 3.42.0 - Add 16bpc fixed point display support
+ * - 3.43.0 - Add device hot plug/unplug support
+ * - 3.44.0 - DCN3 supports DCC independent block settings: !64B && 128B, 64B && 128B
+ * - 3.45.0 - Add context ioctl stable pstate interface
+ * - 3.46.0 - To enable hot plug amdgpu tests in libdrm
+ * - 3.47.0 - Add AMDGPU_GEM_CREATE_DISCARDABLE and AMDGPU_VM_NOALLOC flags
+ * - 3.48.0 - Add IP discovery version info to HW INFO
+ * - 3.49.0 - Add gang submit into CS IOCTL
+ * - 3.50.0 - Update AMDGPU_INFO_DEV_INFO IOCTL for minimum engine and memory clock
+ * Update AMDGPU_INFO_SENSOR IOCTL for PEAK_PSTATE engine and memory clock
+ * 3.51.0 - Return the PCIe gen and lanes from the INFO ioctl
+ * 3.52.0 - Add AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD, add device_info fields:
+ * tcp_cache_size, num_sqc_per_wgp, sqc_data_cache_size, sqc_inst_cache_size,
+ * gl1c_cache_size, gl2c_cache_size, mall_size, enabled_rb_pipes_mask_hi
+ * 3.53.0 - Support for GFX11 CP GFX shadowing
+ * 3.54.0 - Add AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS support
+ * - 3.55.0 - Add AMDGPU_INFO_GPUVM_FAULT query
+ * - 3.56.0 - Update IB start address and size alignment for decode and encode
+ * - 3.57.0 - Compute tunneling on GFX10+
+ * - 3.58.0 - Add GFX12 DCC support
+ * - 3.59.0 - Cleared VRAM
+ * - 3.60.0 - Add AMDGPU_TILING_GFX12_DCC_WRITE_COMPRESS_DISABLE (Vulkan requirement)
+ * - 3.61.0 - Contains fix for RV/PCO compute queues
+ * - 3.62.0 - Add AMDGPU_IDS_FLAGS_MODE_PF, AMDGPU_IDS_FLAGS_MODE_VF & AMDGPU_IDS_FLAGS_MODE_PT
+ * - 3.63.0 - GFX12 display DCC supports 256B max compressed block size
+ * - 3.64.0 - Userq IP support query
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 15
+#define KMS_DRIVER_MINOR 64
#define KMS_DRIVER_PATCHLEVEL 0
-int amdgpu_vram_limit = 0;
+/*
+ * amdgpu.debug module options. Are all disabled by default
+ */
+enum AMDGPU_DEBUG_MASK {
+ AMDGPU_DEBUG_VM = BIT(0),
+ AMDGPU_DEBUG_LARGEBAR = BIT(1),
+ AMDGPU_DEBUG_DISABLE_GPU_SOFT_RECOVERY = BIT(2),
+ AMDGPU_DEBUG_USE_VRAM_FW_BUF = BIT(3),
+ AMDGPU_DEBUG_ENABLE_RAS_ACA = BIT(4),
+ AMDGPU_DEBUG_ENABLE_EXP_RESETS = BIT(5),
+ AMDGPU_DEBUG_DISABLE_GPU_RING_RESET = BIT(6),
+ AMDGPU_DEBUG_SMU_POOL = BIT(7),
+ AMDGPU_DEBUG_VM_USERPTR = BIT(8),
+ AMDGPU_DEBUG_DISABLE_RAS_CE_LOG = BIT(9)
+};
+
+unsigned int amdgpu_vram_limit = UINT_MAX;
+int amdgpu_vis_vram_limit;
int amdgpu_gart_size = -1; /* auto */
+int amdgpu_gtt_size = -1; /* auto */
int amdgpu_moverate = -1; /* auto */
-int amdgpu_benchmarking = 0;
-int amdgpu_testing = 0;
int amdgpu_audio = -1;
-int amdgpu_disp_priority = 0;
-int amdgpu_hw_i2c = 0;
+int amdgpu_disp_priority;
+int amdgpu_hw_i2c;
int amdgpu_pcie_gen2 = -1;
int amdgpu_msi = -1;
-int amdgpu_lockup_timeout = 0;
+char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
int amdgpu_dpm = -1;
int amdgpu_fw_load_type = -1;
int amdgpu_aspm = -1;
int amdgpu_runtime_pm = -1;
-unsigned amdgpu_ip_block_mask = 0xffffffff;
+uint amdgpu_ip_block_mask = 0xffffffff;
int amdgpu_bapm = -1;
-int amdgpu_deep_color = 0;
+int amdgpu_deep_color;
int amdgpu_vm_size = -1;
+int amdgpu_vm_fragment_size = -1;
int amdgpu_vm_block_size = -1;
-int amdgpu_vm_fault_stop = 0;
-int amdgpu_vm_debug = 0;
-int amdgpu_vram_page_split = 1024;
-int amdgpu_exp_hw_support = 0;
+int amdgpu_vm_fault_stop;
+int amdgpu_vm_update_mode = -1;
+int amdgpu_exp_hw_support;
+int amdgpu_dc = -1;
int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
-int amdgpu_no_evict = 0;
-int amdgpu_direct_gma_size = 0;
-unsigned amdgpu_pcie_gen_cap = 0;
-unsigned amdgpu_pcie_lane_cap = 0;
-unsigned amdgpu_cg_mask = 0xffffffff;
-unsigned amdgpu_pg_mask = 0xffffffff;
-char *amdgpu_disable_cu = NULL;
-char *amdgpu_virtual_display = NULL;
-unsigned amdgpu_pp_feature_mask = 0xffffffff;
-int amdgpu_ngg = 0;
-int amdgpu_prim_buf_per_se = 0;
-int amdgpu_pos_buf_per_se = 0;
-int amdgpu_cntl_sb_buf_per_se = 0;
-int amdgpu_param_buf_per_se = 0;
+uint amdgpu_pcie_gen_cap;
+uint amdgpu_pcie_lane_cap;
+u64 amdgpu_cg_mask = 0xffffffffffffffff;
+uint amdgpu_pg_mask = 0xffffffff;
+uint amdgpu_sdma_phase_quantum = 32;
+char *amdgpu_disable_cu;
+char *amdgpu_virtual_display;
+int amdgpu_enforce_isolation = -1;
+int amdgpu_modeset = -1;
+
+/* Specifies the default granularity for SVM, used in buffer
+ * migration and restoration of backing memory when handling
+ * recoverable page faults.
+ *
+ * The value is given as log(numPages(buffer)); for a 2 MiB
+ * buffer it computes to be 9
+ */
+uint amdgpu_svm_default_granularity = 9;
+/*
+ * OverDrive(bit 14) disabled by default
+ * GFX DCS(bit 19) disabled by default
+ */
+uint amdgpu_pp_feature_mask = 0xfff7bfff;
+uint amdgpu_force_long_training;
+int amdgpu_lbpw = -1;
+int amdgpu_compute_multipipe = -1;
+int amdgpu_gpu_recovery = -1; /* auto */
+int amdgpu_emu_mode;
+uint amdgpu_smu_memory_pool_size;
+int amdgpu_smu_pptable_id = -1;
+/*
+ * FBC (bit 0) disabled by default
+ * MULTI_MON_PP_MCLK_SWITCH (bit 1) enabled by default
+ * - With this, for multiple monitors in sync(e.g. with the same model),
+ * mclk switching will be allowed. And the mclk will be not foced to the
+ * highest. That helps saving some idle power.
+ * DISABLE_FRACTIONAL_PWM (bit 2) disabled by default
+ * PSR (bit 3) disabled by default
+ * EDP NO POWER SEQUENCING (bit 4) disabled by default
+ */
+uint amdgpu_dc_feature_mask = 2;
+uint amdgpu_dc_debug_mask;
+uint amdgpu_dc_visual_confirm;
+int amdgpu_async_gfx_ring = 1;
+int amdgpu_mcbp = -1;
+int amdgpu_discovery = -1;
+int amdgpu_mes;
+int amdgpu_mes_log_enable = 0;
+int amdgpu_mes_kiq;
+int amdgpu_uni_mes = 1;
+int amdgpu_noretry = -1;
+int amdgpu_force_asic_type = -1;
+int amdgpu_tmz = -1; /* auto */
+uint amdgpu_freesync_vid_mode;
+int amdgpu_reset_method = -1; /* auto */
+int amdgpu_num_kcq = -1;
+int amdgpu_smartshift_bias;
+int amdgpu_use_xgmi_p2p = 1;
+int amdgpu_vcnfw_log;
+int amdgpu_sg_display = -1; /* auto */
+int amdgpu_user_partt_mode = AMDGPU_AUTO_COMPUTE_PARTITION_MODE;
+int amdgpu_umsch_mm;
+int amdgpu_seamless = -1; /* auto */
+uint amdgpu_debug_mask;
+int amdgpu_agp = -1; /* auto */
+int amdgpu_wbrf = -1;
+int amdgpu_damage_clips = -1; /* auto */
+int amdgpu_umsch_mm_fwlog;
+int amdgpu_rebar = -1; /* auto */
+int amdgpu_user_queue = -1;
+
+DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
+ "DRM_UT_CORE",
+ "DRM_UT_DRIVER",
+ "DRM_UT_KMS",
+ "DRM_UT_PRIME",
+ "DRM_UT_ATOMIC",
+ "DRM_UT_VBL",
+ "DRM_UT_STATE",
+ "DRM_UT_LEASE",
+ "DRM_UT_DP",
+ "DRM_UT_DRMRES");
+
+struct amdgpu_mgpu_info mgpu_info = {
+ .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
+};
+int amdgpu_ras_enable = -1;
+uint amdgpu_ras_mask = 0xffffffff;
+int amdgpu_bad_page_threshold = -1;
+struct amdgpu_watchdog_timer amdgpu_watchdog_timer = {
+ .timeout_fatal_disable = false,
+ .period = 0x0, /* default to 0x0 (timeout disable) */
+};
+
+/**
+ * DOC: vramlimit (int)
+ * Restrict the total amount of VRAM in MiB for testing. The default is 0 (Use full VRAM).
+ */
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
-MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)");
-module_param_named(gartsize, amdgpu_gart_size, int, 0600);
+/**
+ * DOC: vis_vramlimit (int)
+ * Restrict the amount of CPU visible VRAM in MiB for testing. The default is 0 (Use full CPU visible VRAM).
+ */
+MODULE_PARM_DESC(vis_vramlimit, "Restrict visible VRAM for testing, in megabytes");
+module_param_named(vis_vramlimit, amdgpu_vis_vram_limit, int, 0444);
-MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)");
-module_param_named(moverate, amdgpu_moverate, int, 0600);
+/**
+ * DOC: gartsize (uint)
+ * Restrict the size of GART (for kernel use) in Mib (32, 64, etc.) for testing.
+ * The default is -1 (The size depends on asic).
+ */
+MODULE_PARM_DESC(gartsize, "Size of kernel GART to setup in megabytes (32, 64, etc., -1=auto)");
+module_param_named(gartsize, amdgpu_gart_size, uint, 0600);
-MODULE_PARM_DESC(benchmark, "Run benchmark");
-module_param_named(benchmark, amdgpu_benchmarking, int, 0444);
+/**
+ * DOC: gttsize (int)
+ * Restrict the size of GTT domain (for userspace use) in MiB for testing.
+ * The default is -1 (Use value specified by TTM).
+ * This parameter is deprecated and will be removed in the future.
+ */
+MODULE_PARM_DESC(gttsize, "Size of the GTT userspace domain in megabytes (-1 = auto)");
+module_param_named(gttsize, amdgpu_gtt_size, int, 0600);
-MODULE_PARM_DESC(test, "Run tests");
-module_param_named(test, amdgpu_testing, int, 0444);
+/**
+ * DOC: moverate (int)
+ * Set maximum buffer migration rate in MB/s. The default is -1 (8 MB/s).
+ */
+MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)");
+module_param_named(moverate, amdgpu_moverate, int, 0600);
+/**
+ * DOC: audio (int)
+ * Set HDMI/DPAudio. Only affects non-DC display handling. The default is -1 (Enabled), set 0 to disabled it.
+ */
MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)");
module_param_named(audio, amdgpu_audio, int, 0444);
+/**
+ * DOC: disp_priority (int)
+ * Set display Priority (1 = normal, 2 = high). Only affects non-DC display handling. The default is 0 (auto).
+ */
MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
module_param_named(disp_priority, amdgpu_disp_priority, int, 0444);
+/**
+ * DOC: hw_i2c (int)
+ * To enable hw i2c engine. Only affects non-DC display handling. The default is 0 (Disabled).
+ */
MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
module_param_named(hw_i2c, amdgpu_hw_i2c, int, 0444);
+/**
+ * DOC: pcie_gen2 (int)
+ * To disable PCIE Gen2/3 mode (0 = disable, 1 = enable). The default is -1 (auto, enabled).
+ */
MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (-1 = auto, 0 = disable, 1 = enable)");
module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444);
+/**
+ * DOC: msi (int)
+ * To disable Message Signaled Interrupts (MSI) functionality (1 = enable, 0 = disable). The default is -1 (auto, enabled).
+ */
MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(msi, amdgpu_msi, int, 0444);
-MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default 0 = disable)");
-module_param_named(lockup_timeout, amdgpu_lockup_timeout, int, 0444);
+/**
+ * DOC: svm_default_granularity (uint)
+ * Used in buffer migration and handling of recoverable page faults
+ */
+MODULE_PARM_DESC(svm_default_granularity, "SVM's default granularity in log(2^Pages), default 9 = 2^9 = 2 MiB");
+module_param_named(svm_default_granularity, amdgpu_svm_default_granularity, uint, 0644);
+/**
+ * DOC: lockup_timeout (string)
+ * Set GPU scheduler timeout value in ms.
+ *
+ * The format can be [Non-Compute] or [GFX,Compute,SDMA,Video]. That is there can be one or
+ * multiple values specified. 0 and negative values are invalidated. They will be adjusted
+ * to the default timeout.
+ *
+ * - With one value specified, the setting will apply to all non-compute jobs.
+ * - With multiple values specified, the first one will be for GFX.
+ * The second one is for Compute. The third and fourth ones are
+ * for SDMA and Video.
+ *
+ * By default(with no lockup_timeout settings), the timeout for all jobs is 10000.
+ */
+MODULE_PARM_DESC(lockup_timeout,
+ "GPU lockup timeout in ms (default: 10000 for all jobs. "
+ "0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
+ "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
+module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
+
+/**
+ * DOC: dpm (int)
+ * Override for dynamic power management setting
+ * (0 = disable, 1 = enable)
+ * The default is -1 (auto).
+ */
MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(dpm, amdgpu_dpm, int, 0444);
-MODULE_PARM_DESC(fw_load_type, "firmware loading type (0 = direct, 1 = SMU, 2 = PSP, -1 = auto)");
+/**
+ * DOC: fw_load_type (int)
+ * Set different firmware loading type for debugging, if supported.
+ * Set to 0 to force direct loading if supported by the ASIC. Set
+ * to -1 to select the default loading mode for the ASIC, as defined
+ * by the driver. The default is -1 (auto).
+ */
+MODULE_PARM_DESC(fw_load_type, "firmware loading type (3 = rlc backdoor autoload if supported, 2 = smu load if supported, 1 = psp load, 0 = force direct if supported, -1 = auto)");
module_param_named(fw_load_type, amdgpu_fw_load_type, int, 0444);
+/**
+ * DOC: aspm (int)
+ * To disable ASPM (1 = enable, 0 = disable). The default is -1 (auto, enabled).
+ */
MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(aspm, amdgpu_aspm, int, 0444);
-MODULE_PARM_DESC(runpm, "PX runtime pm (1 = force enable, 0 = disable, -1 = PX only default)");
+/**
+ * DOC: runpm (int)
+ * Override for runtime power management control for dGPUs. The amdgpu driver can dynamically power down
+ * the dGPUs when they are idle if supported. The default is -1 (auto enable).
+ * Setting the value to 0 disables this functionality.
+ * Setting the value to -2 is auto enabled with power down when displays are attached.
+ */
+MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = auto, -2 = auto with displays)");
module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
+/**
+ * DOC: ip_block_mask (uint)
+ * Override what IP blocks are enabled on the GPU. Each GPU is a collection of IP blocks (gfx, display, video, etc.).
+ * Use this parameter to disable specific blocks. Note that the IP blocks do not have a fixed index. Some asics may not have
+ * some IPs or may include multiple instances of an IP so the ordering various from asic to asic. See the driver output in
+ * the kernel log for the list of IPs on the asic. The default is 0xffffffff (enable all blocks on a device).
+ */
MODULE_PARM_DESC(ip_block_mask, "IP Block Mask (all blocks enabled (default))");
-module_param_named(ip_block_mask, amdgpu_ip_block_mask, uint, 0444);
+module_param_named_unsafe(ip_block_mask, amdgpu_ip_block_mask, uint, 0444);
+/**
+ * DOC: bapm (int)
+ * Bidirectional Application Power Management (BAPM) used to dynamically share TDP between CPU and GPU. Set value 0 to disable it.
+ * The default -1 (auto, enabled)
+ */
MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(bapm, amdgpu_bapm, int, 0444);
+/**
+ * DOC: deep_color (int)
+ * Set 1 to enable Deep Color support. Only affects non-DC display handling. The default is 0 (disabled).
+ */
MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
module_param_named(deep_color, amdgpu_deep_color, int, 0444);
+/**
+ * DOC: vm_size (int)
+ * Override the size of the GPU's per client virtual address space in GiB. The default is -1 (automatic for each asic).
+ */
MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 64GB)");
module_param_named(vm_size, amdgpu_vm_size, int, 0444);
+/**
+ * DOC: vm_fragment_size (int)
+ * Override VM fragment size in bits (4, 5, etc. 4 = 64K, 9 = 2M). The default is -1 (automatic for each asic).
+ */
+MODULE_PARM_DESC(vm_fragment_size, "VM fragment size in bits (4, 5, etc. 4 = 64K (default), Max 9 = 2M)");
+module_param_named(vm_fragment_size, amdgpu_vm_fragment_size, int, 0444);
+
+/**
+ * DOC: vm_block_size (int)
+ * Override VM page table size in bits (default depending on vm_size and hw setup). The default is -1 (automatic for each asic).
+ */
MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444);
+/**
+ * DOC: vm_fault_stop (int)
+ * Stop on VM fault for debugging (0 = never, 1 = print first, 2 = always). The default is 0 (No stop).
+ */
MODULE_PARM_DESC(vm_fault_stop, "Stop on VM fault (0 = never (default), 1 = print first, 2 = always)");
module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444);
-MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)");
-module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
-
-MODULE_PARM_DESC(vram_page_split, "Number of pages after we split VRAM allocations (default 1024, -1 = disable)");
-module_param_named(vram_page_split, amdgpu_vram_page_split, int, 0444);
+/**
+ * DOC: vm_update_mode (int)
+ * Override VM update mode. VM updated by using CPU (0 = never, 1 = Graphics only, 2 = Compute only, 3 = Both). The default
+ * is -1 (Only in large BAR(LB) systems Compute VM tables will be updated by CPU, otherwise 0, never).
+ */
+MODULE_PARM_DESC(vm_update_mode, "VM update using CPU (0 = never (default except for large BAR(LB)), 1 = Graphics only, 2 = Compute only (default for LB), 3 = Both");
+module_param_named(vm_update_mode, amdgpu_vm_update_mode, int, 0444);
+/**
+ * DOC: exp_hw_support (int)
+ * Enable experimental hw support (1 = enable). The default is 0 (disabled).
+ */
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
-module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
+module_param_named_unsafe(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
+/**
+ * DOC: dc (int)
+ * Disable/Enable Display Core driver for debugging (1 = enable, 0 = disable). The default is -1 (automatic for each asic).
+ */
+MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))");
+module_param_named(dc, amdgpu_dc, int, 0444);
+
+/**
+ * DOC: sched_jobs (int)
+ * Override the max number of jobs supported in the sw queue. The default is 32.
+ */
MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
+/**
+ * DOC: sched_hw_submission (int)
+ * Override the max number of HW submissions. The default is 2.
+ */
MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
+/**
+ * DOC: ppfeaturemask (hexint)
+ * Override power features enabled. See enum PP_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
+ * The default is the current set of stable power features.
+ */
MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
-module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, int, 0444);
+module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, hexint, 0444);
-MODULE_PARM_DESC(no_evict, "Support pinning request from user space (1 = enable, 0 = disable (default))");
-module_param_named(no_evict, amdgpu_no_evict, int, 0444);
-
-MODULE_PARM_DESC(direct_gma_size, "Direct GMA size in megabytes (max 96MB)");
-module_param_named(direct_gma_size, amdgpu_direct_gma_size, int, 0444);
+/**
+ * DOC: forcelongtraining (uint)
+ * Force long memory training in resume.
+ * The default is zero, indicates short training in resume.
+ */
+MODULE_PARM_DESC(forcelongtraining, "force memory long training");
+module_param_named(forcelongtraining, amdgpu_force_long_training, uint, 0444);
+/**
+ * DOC: pcie_gen_cap (uint)
+ * Override PCIE gen speed capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
+ * The default is 0 (automatic for each asic).
+ */
MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
+/**
+ * DOC: pcie_lane_cap (uint)
+ * Override PCIE lanes capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
+ * The default is 0 (automatic for each asic).
+ */
MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
+/**
+ * DOC: cg_mask (ullong)
+ * Override Clockgating features enabled on GPU (0 = disable clock gating). See the AMD_CG_SUPPORT flags in
+ * drivers/gpu/drm/amd/include/amd_shared.h. The default is 0xffffffffffffffff (all enabled).
+ */
MODULE_PARM_DESC(cg_mask, "Clockgating flags mask (0 = disable clock gating)");
-module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444);
+module_param_named(cg_mask, amdgpu_cg_mask, ullong, 0444);
+/**
+ * DOC: pg_mask (uint)
+ * Override Powergating features enabled on GPU (0 = disable power gating). See the AMD_PG_SUPPORT flags in
+ * drivers/gpu/drm/amd/include/amd_shared.h. The default is 0xffffffff (all enabled).
+ */
MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)");
module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
+/**
+ * DOC: sdma_phase_quantum (uint)
+ * Override SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change). The default is 32.
+ */
+MODULE_PARM_DESC(sdma_phase_quantum, "SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change (default 32))");
+module_param_named(sdma_phase_quantum, amdgpu_sdma_phase_quantum, uint, 0444);
+
+/**
+ * DOC: disable_cu (charp)
+ * Set to disable CUs (It's set like se.sh.cu,...). The default is NULL.
+ */
MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
+/**
+ * DOC: virtual_display (charp)
+ * Set to enable virtual display feature. This feature provides a virtual display hardware on headless boards
+ * or in virtualized environments. It will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x. It's the pci address of
+ * the device, plus the number of crtcs to expose. E.g., 0000:26:00.0,4 would enable 4 virtual crtcs on the pci
+ * device at 26:00.0. The default is NULL.
+ */
MODULE_PARM_DESC(virtual_display,
"Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)");
module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
-MODULE_PARM_DESC(ngg, "Next Generation Graphics (1 = enable, 0 = disable(default depending on gfx))");
-module_param_named(ngg, amdgpu_ngg, int, 0444);
+/**
+ * DOC: lbpw (int)
+ * Override Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable). The default is -1 (auto, enabled).
+ */
+MODULE_PARM_DESC(lbpw, "Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(lbpw, amdgpu_lbpw, int, 0444);
+
+MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)");
+module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
+
+/**
+ * DOC: gpu_recovery (int)
+ * Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default is -1 (auto, disabled except SRIOV).
+ */
+MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)");
+module_param_named_unsafe(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
+
+/**
+ * DOC: emu_mode (int)
+ * Set value 1 to enable emulation mode. This is only needed when running on an emulator. The default is 0 (disabled).
+ */
+MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable)");
+module_param_named_unsafe(emu_mode, amdgpu_emu_mode, int, 0444);
+
+/**
+ * DOC: ras_enable (int)
+ * Enable RAS features on the GPU (0 = disable, 1 = enable, -1 = auto (default))
+ */
+MODULE_PARM_DESC(ras_enable, "Enable RAS features on the GPU (0 = disable, 1 = enable, -1 = auto (default))");
+module_param_named(ras_enable, amdgpu_ras_enable, int, 0444);
+
+/**
+ * DOC: ras_mask (uint)
+ * Mask of RAS features to enable (default 0xffffffff), only valid when ras_enable == 1
+ * See the flags in drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+ */
+MODULE_PARM_DESC(ras_mask, "Mask of RAS features to enable (default 0xffffffff), only valid when ras_enable == 1");
+module_param_named(ras_mask, amdgpu_ras_mask, uint, 0444);
+
+/**
+ * DOC: timeout_fatal_disable (bool)
+ * Disable Watchdog timeout fatal error event
+ */
+MODULE_PARM_DESC(timeout_fatal_disable, "disable watchdog timeout fatal error (false = default)");
+module_param_named(timeout_fatal_disable, amdgpu_watchdog_timer.timeout_fatal_disable, bool, 0644);
+
+/**
+ * DOC: timeout_period (uint)
+ * Modify the watchdog timeout max_cycles as (1 << period)
+ */
+MODULE_PARM_DESC(timeout_period, "watchdog timeout period (0 = timeout disabled, 1 ~ 0x23 = timeout maxcycles = (1 << period)");
+module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644);
+
+/**
+ * DOC: si_support (int)
+ * Set SI support driver. This parameter works after set config CONFIG_DRM_AMDGPU_SI. For SI asic, when radeon driver is enabled,
+ * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
+ * otherwise using amdgpu driver.
+ */
+#ifdef CONFIG_DRM_AMDGPU_SI
+
+#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE)
+int amdgpu_si_support;
+MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled (default))");
+#else
+int amdgpu_si_support = 1;
+MODULE_PARM_DESC(si_support, "SI support (1 = enabled (default), 0 = disabled)");
+#endif
+
+module_param_named(si_support, amdgpu_si_support, int, 0444);
+#endif
+
+/**
+ * DOC: cik_support (int)
+ * Set CIK support driver. This parameter works after set config CONFIG_DRM_AMDGPU_CIK. For CIK asic, when radeon driver is enabled,
+ * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
+ * otherwise using amdgpu driver.
+ */
+#ifdef CONFIG_DRM_AMDGPU_CIK
+
+#if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE)
+int amdgpu_cik_support;
+MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled (default))");
+#else
+int amdgpu_cik_support = 1;
+MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)");
+#endif
+
+module_param_named(cik_support, amdgpu_cik_support, int, 0444);
+#endif
+
+/**
+ * DOC: smu_memory_pool_size (uint)
+ * It is used to reserve gtt for smu debug usage, setting value 0 to disable it. The actual size is value * 256MiB.
+ * E.g. 0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte. The default is 0 (disabled).
+ */
+MODULE_PARM_DESC(smu_memory_pool_size,
+ "reserve gtt for smu debug usage, 0 = disable,0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte");
+module_param_named(smu_memory_pool_size, amdgpu_smu_memory_pool_size, uint, 0444);
+
+/**
+ * DOC: async_gfx_ring (int)
+ * It is used to enable gfx rings that could be configured with different prioritites or equal priorities
+ */
+MODULE_PARM_DESC(async_gfx_ring,
+ "Asynchronous GFX rings that could be configured with either different priorities (HP3D ring and LP3D ring), or equal priorities (0 = disabled, 1 = enabled (default))");
+module_param_named(async_gfx_ring, amdgpu_async_gfx_ring, int, 0444);
+
+/**
+ * DOC: mcbp (int)
+ * It is used to enable mid command buffer preemption. (0 = disabled, 1 = enabled, -1 auto (default))
+ */
+MODULE_PARM_DESC(mcbp,
+ "Enable Mid-command buffer preemption (0 = disabled, 1 = enabled), -1 = auto (default)");
+module_param_named(mcbp, amdgpu_mcbp, int, 0444);
+
+/**
+ * DOC: discovery (int)
+ * Allow driver to discover hardware IP information from IP Discovery table at the top of VRAM.
+ * (-1 = auto (default), 0 = disabled, 1 = enabled, 2 = use ip_discovery table from file)
+ */
+MODULE_PARM_DESC(discovery,
+ "Allow driver to discover hardware IPs from IP Discovery table at the top of VRAM");
+module_param_named(discovery, amdgpu_discovery, int, 0444);
+
+/**
+ * DOC: mes (int)
+ * Enable Micro Engine Scheduler. This is a new hw scheduling engine for gfx, sdma, and compute.
+ * (0 = disabled (default), 1 = enabled)
+ */
+MODULE_PARM_DESC(mes,
+ "Enable Micro Engine Scheduler (0 = disabled (default), 1 = enabled)");
+module_param_named(mes, amdgpu_mes, int, 0444);
+
+/**
+ * DOC: mes_log_enable (int)
+ * Enable Micro Engine Scheduler log. This is used to enable/disable MES internal log.
+ * (0 = disabled (default), 1 = enabled)
+ */
+MODULE_PARM_DESC(mes_log_enable,
+ "Enable Micro Engine Scheduler log (0 = disabled (default), 1 = enabled)");
+module_param_named(mes_log_enable, amdgpu_mes_log_enable, int, 0444);
+
+/**
+ * DOC: mes_kiq (int)
+ * Enable Micro Engine Scheduler KIQ. This is a new engine pipe for kiq.
+ * (0 = disabled (default), 1 = enabled)
+ */
+MODULE_PARM_DESC(mes_kiq,
+ "Enable Micro Engine Scheduler KIQ (0 = disabled (default), 1 = enabled)");
+module_param_named(mes_kiq, amdgpu_mes_kiq, int, 0444);
+
+/**
+ * DOC: uni_mes (int)
+ * Enable Unified Micro Engine Scheduler. This is a new engine pipe for unified scheduler.
+ * (0 = disabled (default), 1 = enabled)
+ */
+MODULE_PARM_DESC(uni_mes,
+ "Enable Unified Micro Engine Scheduler (0 = disabled, 1 = enabled(default)");
+module_param_named(uni_mes, amdgpu_uni_mes, int, 0444);
+
+/**
+ * DOC: noretry (int)
+ * Disable XNACK retry in the SQ by default on GFXv9 hardware. On ASICs that
+ * do not support per-process XNACK this also disables retry page faults.
+ * (0 = retry enabled, 1 = retry disabled, -1 auto (default))
+ */
+MODULE_PARM_DESC(noretry,
+ "Disable retry faults (0 = retry enabled, 1 = retry disabled, -1 auto (default))");
+module_param_named(noretry, amdgpu_noretry, int, 0644);
+
+/**
+ * DOC: force_asic_type (int)
+ * A non negative value used to specify the asic type for all supported GPUs.
+ */
+MODULE_PARM_DESC(force_asic_type,
+ "A non negative value used to specify the asic type for all supported GPUs");
+module_param_named_unsafe(force_asic_type, amdgpu_force_asic_type, int, 0444);
+
+/**
+ * DOC: use_xgmi_p2p (int)
+ * Enables/disables XGMI P2P interface (0 = disable, 1 = enable).
+ */
+MODULE_PARM_DESC(use_xgmi_p2p,
+ "Enable XGMI P2P interface (0 = disable; 1 = enable (default))");
+module_param_named(use_xgmi_p2p, amdgpu_use_xgmi_p2p, int, 0444);
+
+
+#ifdef CONFIG_HSA_AMD
+/**
+ * DOC: sched_policy (int)
+ * Set scheduling policy. Default is HWS(hardware scheduling) with over-subscription.
+ * Setting 1 disables over-subscription. Setting 2 disables HWS and statically
+ * assigns queues to HQDs.
+ */
+int sched_policy = KFD_SCHED_POLICY_HWS;
+module_param_unsafe(sched_policy, int, 0444);
+MODULE_PARM_DESC(sched_policy,
+ "Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)");
+
+/**
+ * DOC: hws_max_conc_proc (int)
+ * Maximum number of processes that HWS can schedule concurrently. The maximum is the
+ * number of VMIDs assigned to the HWS, which is also the default.
+ */
+int hws_max_conc_proc = -1;
+module_param(hws_max_conc_proc, int, 0444);
+MODULE_PARM_DESC(hws_max_conc_proc,
+ "Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))");
+
+/**
+ * DOC: cwsr_enable (int)
+ * CWSR(compute wave store and resume) allows the GPU to preempt shader execution in
+ * the middle of a compute wave. Default is 1 to enable this feature. Setting 0
+ * disables it.
+ */
+int cwsr_enable = 1;
+module_param(cwsr_enable, int, 0444);
+MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = Off, 1 = On (Default))");
+
+/**
+ * DOC: max_num_of_queues_per_device (int)
+ * Maximum number of queues per device. Valid setting is between 1 and 4096. Default
+ * is 4096.
+ */
+int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
+module_param(max_num_of_queues_per_device, int, 0444);
+MODULE_PARM_DESC(max_num_of_queues_per_device,
+ "Maximum number of supported queues per device (1 = Minimum, 4096 = default)");
+
+/**
+ * DOC: send_sigterm (int)
+ * Send sigterm to HSA process on unhandled exceptions. Default is not to send sigterm
+ * but just print errors on dmesg. Setting 1 enables sending sigterm.
+ */
+int send_sigterm;
+module_param(send_sigterm, int, 0444);
+MODULE_PARM_DESC(send_sigterm,
+ "Send sigterm to HSA process on unhandled exception (0 = disable, 1 = enable)");
+
+/**
+ * DOC: halt_if_hws_hang (int)
+ * Halt if HWS hang is detected. Default value, 0, disables the halt on hang.
+ * Setting 1 enables halt on hang.
+ */
+int halt_if_hws_hang;
+module_param_unsafe(halt_if_hws_hang, int, 0644);
+MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
+
+/**
+ * DOC: hws_gws_support(bool)
+ * Assume that HWS supports GWS barriers regardless of what firmware version
+ * check says. Default value: false (rely on MEC2 firmware version check).
+ */
+bool hws_gws_support;
+module_param_unsafe(hws_gws_support, bool, 0444);
+MODULE_PARM_DESC(hws_gws_support, "Assume MEC2 FW supports GWS barriers (false = rely on FW version check (Default), true = force supported)");
+
+/**
+ * DOC: queue_preemption_timeout_ms (int)
+ * queue preemption timeout in ms (1 = Minimum, 9000 = default)
+ */
+int queue_preemption_timeout_ms = 9000;
+module_param(queue_preemption_timeout_ms, int, 0644);
+MODULE_PARM_DESC(queue_preemption_timeout_ms, "queue preemption timeout in ms (1 = Minimum, 9000 = default)");
+
+/**
+ * DOC: debug_evictions(bool)
+ * Enable extra debug messages to help determine the cause of evictions
+ */
+bool debug_evictions;
+module_param(debug_evictions, bool, 0644);
+MODULE_PARM_DESC(debug_evictions, "enable eviction debug messages (false = default)");
+
+/**
+ * DOC: no_system_mem_limit(bool)
+ * Disable system memory limit, to support multiple process shared memory
+ */
+bool no_system_mem_limit;
+module_param(no_system_mem_limit, bool, 0644);
+MODULE_PARM_DESC(no_system_mem_limit, "disable system memory limit (false = default)");
+
+/**
+ * DOC: no_queue_eviction_on_vm_fault (int)
+ * If set, process queues will not be evicted on gpuvm fault. This is to keep the wavefront context for debugging (0 = queue eviction, 1 = no queue eviction). The default is 0 (queue eviction).
+ */
+int amdgpu_no_queue_eviction_on_vm_fault;
+MODULE_PARM_DESC(no_queue_eviction_on_vm_fault, "No queue eviction on VM fault (0 = queue eviction, 1 = no queue eviction)");
+module_param_named_unsafe(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm_fault, int, 0444);
+#endif
+
+/**
+ * DOC: mtype_local (int)
+ */
+int amdgpu_mtype_local;
+MODULE_PARM_DESC(mtype_local, "MTYPE for local memory (0 = MTYPE_RW (default), 1 = MTYPE_NC, 2 = MTYPE_CC)");
+module_param_named_unsafe(mtype_local, amdgpu_mtype_local, int, 0444);
+
+/**
+ * DOC: pcie_p2p (bool)
+ * Enable PCIe P2P (requires large-BAR). Default value: true (on)
+ */
+#ifdef CONFIG_HSA_AMD_P2P
+bool pcie_p2p = true;
+module_param(pcie_p2p, bool, 0444);
+MODULE_PARM_DESC(pcie_p2p, "Enable PCIe P2P (requires large-BAR). (N = off, Y = on(default))");
+#endif
-MODULE_PARM_DESC(prim_buf_per_se, "the size of Primitive Buffer per Shader Engine (default depending on gfx)");
-module_param_named(prim_buf_per_se, amdgpu_prim_buf_per_se, int, 0444);
+/**
+ * DOC: dcfeaturemask (uint)
+ * Override display features enabled. See enum DC_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
+ * The default is the current set of stable display features.
+ */
+MODULE_PARM_DESC(dcfeaturemask, "all stable DC features enabled (default))");
+module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444);
-MODULE_PARM_DESC(pos_buf_per_se, "the size of Position Buffer per Shader Engine (default depending on gfx)");
-module_param_named(pos_buf_per_se, amdgpu_pos_buf_per_se, int, 0444);
+/**
+ * DOC: dcdebugmask (uint)
+ * Display debug options. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
+ */
+MODULE_PARM_DESC(dcdebugmask, "all debug options disabled (default))");
+module_param_named(dcdebugmask, amdgpu_dc_debug_mask, uint, 0444);
-MODULE_PARM_DESC(cntl_sb_buf_per_se, "the size of Control Sideband per Shader Engine (default depending on gfx)");
-module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444);
+MODULE_PARM_DESC(visualconfirm, "Visual confirm (0 = off (default), 1 = MPO, 5 = PSR)");
+module_param_named(visualconfirm, amdgpu_dc_visual_confirm, uint, 0444);
-MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Pramater Cache per Shader Engine (default depending on gfx)");
-module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444);
+/**
+ * DOC: abmlevel (uint)
+ * Override the default ABM (Adaptive Backlight Management) level used for DC
+ * enabled hardware. Requires DMCU to be supported and loaded.
+ * Valid levels are 0-4. A value of 0 indicates that ABM should be disabled by
+ * default. Values 1-4 control the maximum allowable brightness reduction via
+ * the ABM algorithm, with 1 being the least reduction and 4 being the most
+ * reduction.
+ *
+ * Defaults to -1, or auto. Userspace can only override this level after
+ * boot if it's set to auto.
+ */
+int amdgpu_dm_abm_level = -1;
+MODULE_PARM_DESC(abmlevel,
+ "ABM level (0 = off, 1-4 = backlight reduction level, -1 auto (default))");
+module_param_named(abmlevel, amdgpu_dm_abm_level, int, 0444);
+
+int amdgpu_backlight = -1;
+MODULE_PARM_DESC(backlight, "Backlight control (0 = pwm, 1 = aux, -1 auto (default))");
+module_param_named(backlight, amdgpu_backlight, bint, 0444);
+
+/**
+ * DOC: damageclips (int)
+ * Enable or disable damage clips support. If damage clips support is disabled,
+ * we will force full frame updates, irrespective of what user space sends to
+ * us.
+ *
+ * Defaults to -1 (where it is enabled unless a PSR-SU display is detected).
+ */
+MODULE_PARM_DESC(damageclips,
+ "Damage clips support (0 = disable, 1 = enable, -1 auto (default))");
+module_param_named(damageclips, amdgpu_damage_clips, int, 0444);
+
+/**
+ * DOC: tmz (int)
+ * Trusted Memory Zone (TMZ) is a method to protect data being written
+ * to or read from memory.
+ *
+ * The default value: 0 (off). TODO: change to auto till it is completed.
+ */
+MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto (default), 0 = off, 1 = on)");
+module_param_named(tmz, amdgpu_tmz, int, 0444);
+
+/**
+ * DOC: freesync_video (uint)
+ * Enable the optimization to adjust front porch timing to achieve seamless
+ * mode change experience when setting a freesync supported mode for which full
+ * modeset is not needed.
+ *
+ * The Display Core will add a set of modes derived from the base FreeSync
+ * video mode into the corresponding connector's mode list based on commonly
+ * used refresh rates and VRR range of the connected display, when users enable
+ * this feature. From the userspace perspective, they can see a seamless mode
+ * change experience when the change between different refresh rates under the
+ * same resolution. Additionally, userspace applications such as Video playback
+ * can read this modeset list and change the refresh rate based on the video
+ * frame rate. Finally, the userspace can also derive an appropriate mode for a
+ * particular refresh rate based on the FreeSync Mode and add it to the
+ * connector's mode list.
+ *
+ * Note: This is an experimental feature.
+ *
+ * The default value: 0 (off).
+ */
+MODULE_PARM_DESC(
+ freesync_video,
+ "Adds additional modes via VRR for refresh changes without a full modeset (0 = off (default), 1 = on)");
+module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
+
+/**
+ * DOC: reset_method (int)
+ * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)
+ */
+MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco)");
+module_param_named_unsafe(reset_method, amdgpu_reset_method, int, 0644);
+
+/**
+ * DOC: bad_page_threshold (int) Bad page threshold is specifies the
+ * threshold value of faulty pages detected by RAS ECC, which may
+ * result in the GPU entering bad status when the number of total
+ * faulty pages by ECC exceeds the threshold value.
+ */
+MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = ignore threshold (default value), 0 = disable bad page retirement, -2 = threshold determined by a formula, 0 < threshold < max records, user-defined threshold)");
+module_param_named(bad_page_threshold, amdgpu_bad_page_threshold, int, 0444);
+
+MODULE_PARM_DESC(num_kcq, "number of kernel compute queue user want to setup (8 if set to greater than 8 or less than 0, only affect gfx 8+)");
+module_param_named(num_kcq, amdgpu_num_kcq, int, 0444);
+
+/**
+ * DOC: vcnfw_log (int)
+ * Enable vcnfw log output for debugging, the default is disabled.
+ */
+MODULE_PARM_DESC(vcnfw_log, "Enable vcnfw log(0 = disable (default value), 1 = enable)");
+module_param_named(vcnfw_log, amdgpu_vcnfw_log, int, 0444);
+
+/**
+ * DOC: sg_display (int)
+ * Disable S/G (scatter/gather) display (i.e., display from system memory).
+ * This option is only relevant on APUs. Set this option to 0 to disable
+ * S/G display if you experience flickering or other issues under memory
+ * pressure and report the issue.
+ */
+MODULE_PARM_DESC(sg_display, "S/G Display (-1 = auto (default), 0 = disable)");
+module_param_named(sg_display, amdgpu_sg_display, int, 0444);
+
+/**
+ * DOC: umsch_mm (int)
+ * Enable Multi Media User Mode Scheduler. This is a HW scheduling engine for VCN and VPE.
+ * (0 = disabled (default), 1 = enabled)
+ */
+MODULE_PARM_DESC(umsch_mm,
+ "Enable Multi Media User Mode Scheduler (0 = disabled (default), 1 = enabled)");
+module_param_named(umsch_mm, amdgpu_umsch_mm, int, 0444);
+
+/**
+ * DOC: umsch_mm_fwlog (int)
+ * Enable umschfw log output for debugging, the default is disabled.
+ */
+MODULE_PARM_DESC(umsch_mm_fwlog, "Enable umschfw log(0 = disable (default value), 1 = enable)");
+module_param_named(umsch_mm_fwlog, amdgpu_umsch_mm_fwlog, int, 0444);
+
+/**
+ * DOC: smu_pptable_id (int)
+ * Used to override pptable id. id = 0 use VBIOS pptable.
+ * id > 0 use the soft pptable with specicfied id.
+ */
+MODULE_PARM_DESC(smu_pptable_id,
+ "specify pptable id to be used (-1 = auto(default) value, 0 = use pptable from vbios, > 0 = soft pptable id)");
+module_param_named(smu_pptable_id, amdgpu_smu_pptable_id, int, 0444);
+
+/**
+ * DOC: partition_mode (int)
+ * Used to override the default SPX mode.
+ */
+MODULE_PARM_DESC(
+ user_partt_mode,
+ "specify partition mode to be used (-2 = AMDGPU_AUTO_COMPUTE_PARTITION_MODE(default value) \
+ 0 = AMDGPU_SPX_PARTITION_MODE, \
+ 1 = AMDGPU_DPX_PARTITION_MODE, \
+ 2 = AMDGPU_TPX_PARTITION_MODE, \
+ 3 = AMDGPU_QPX_PARTITION_MODE, \
+ 4 = AMDGPU_CPX_PARTITION_MODE)");
+module_param_named(user_partt_mode, amdgpu_user_partt_mode, uint, 0444);
+/**
+ * DOC: enforce_isolation (int)
+ * enforce process isolation between graphics and compute.
+ * (-1 = auto, 0 = disable, 1 = enable, 2 = enable legacy mode, 3 = enable without cleaner shader)
+ */
+module_param_named(enforce_isolation, amdgpu_enforce_isolation, int, 0444);
+MODULE_PARM_DESC(enforce_isolation,
+"enforce process isolation between graphics and compute. (-1 = auto, 0 = disable, 1 = enable, 2 = enable legacy mode, 3 = enable without cleaner shader)");
+
+/**
+ * DOC: modeset (int)
+ * Override nomodeset (1 = override, -1 = auto). The default is -1 (auto).
+ */
+MODULE_PARM_DESC(modeset, "Override nomodeset (1 = enable, -1 = auto)");
+module_param_named(modeset, amdgpu_modeset, int, 0444);
+
+/**
+ * DOC: seamless (int)
+ * Seamless boot will keep the image on the screen during the boot process.
+ */
+MODULE_PARM_DESC(seamless, "Seamless boot (-1 = auto (default), 0 = disable, 1 = enable)");
+module_param_named(seamless, amdgpu_seamless, int, 0444);
+
+/**
+ * DOC: debug_mask (uint)
+ * Debug options for amdgpu, work as a binary mask with the following options:
+ *
+ * - 0x1: Debug VM handling
+ * - 0x2: Enable simulating large-bar capability on non-large bar system. This
+ * limits the VRAM size reported to ROCm applications to the visible
+ * size, usually 256MB.
+ * - 0x4: Disable GPU soft recovery, always do a full reset
+ * - 0x8: Use VRAM for firmware loading
+ * - 0x10: Enable ACA based RAS logging
+ * - 0x20: Enable experimental resets
+ * - 0x40: Disable ring resets
+ * - 0x80: Use VRAM for SMU pool
+ */
+MODULE_PARM_DESC(debug_mask, "debug options for amdgpu, disabled by default");
+module_param_named_unsafe(debug_mask, amdgpu_debug_mask, uint, 0444);
+
+/**
+ * DOC: agp (int)
+ * Enable the AGP aperture. This provides an aperture in the GPU's internal
+ * address space for direct access to system memory. Note that these accesses
+ * are non-snooped, so they are only used for access to uncached memory.
+ */
+MODULE_PARM_DESC(agp, "AGP (-1 = auto (default), 0 = disable, 1 = enable)");
+module_param_named(agp, amdgpu_agp, int, 0444);
+
+/**
+ * DOC: wbrf (int)
+ * Enable Wifi RFI interference mitigation feature.
+ * Due to electrical and mechanical constraints there may be likely interference of
+ * relatively high-powered harmonics of the (G-)DDR memory clocks with local radio
+ * module frequency bands used by Wifi 6/6e/7. To mitigate the possible RFI interference,
+ * with this feature enabled, PMFW will use either “shadowed P-State” or “P-State” based
+ * on active list of frequencies in-use (to be avoided) as part of initial setting or
+ * P-state transition. However, there may be potential performance impact with this
+ * feature enabled.
+ * (0 = disabled, 1 = enabled, -1 = auto (default setting, will be enabled if supported))
+ */
+MODULE_PARM_DESC(wbrf,
+ "Enable Wifi RFI interference mitigation (0 = disabled, 1 = enabled, -1 = auto(default)");
+module_param_named(wbrf, amdgpu_wbrf, int, 0444);
+
+/**
+ * DOC: rebar (int)
+ * Allow BAR resizing. Disable this to prevent the driver from attempting
+ * to resize the BAR if the GPU supports it and there is available MMIO space.
+ * Note that this just prevents the driver from resizing the BAR. The BIOS
+ * may have already resized the BAR at boot time.
+ */
+MODULE_PARM_DESC(rebar, "Resizable BAR (-1 = auto (default), 0 = disable, 1 = enable)");
+module_param_named(rebar, amdgpu_rebar, int, 0444);
+
+/**
+ * DOC: user_queue (int)
+ * Enable user queues on systems that support user queues. Possible values:
+ *
+ * - -1 = auto (ASIC specific default)
+ * - 0 = user queues disabled
+ * - 1 = user queues enabled and kernel queues enabled (if supported)
+ * - 2 = user queues enabled and kernel queues disabled
+ */
+MODULE_PARM_DESC(user_queue, "Enable user queues (-1 = auto (default), 0 = disable, 1 = enable, 2 = enable UQs and disable KQs)");
+module_param_named(user_queue, amdgpu_user_queue, int, 0444);
+
+/* These devices are not supported by amdgpu.
+ * They are supported by the mach64, r128, radeon drivers
+ */
+static const u16 amdgpu_unsupported_pciidlist[] = {
+ /* mach64 */
+ 0x4354,
+ 0x4358,
+ 0x4554,
+ 0x4742,
+ 0x4744,
+ 0x4749,
+ 0x474C,
+ 0x474D,
+ 0x474E,
+ 0x474F,
+ 0x4750,
+ 0x4751,
+ 0x4752,
+ 0x4753,
+ 0x4754,
+ 0x4755,
+ 0x4756,
+ 0x4757,
+ 0x4758,
+ 0x4759,
+ 0x475A,
+ 0x4C42,
+ 0x4C44,
+ 0x4C47,
+ 0x4C49,
+ 0x4C4D,
+ 0x4C4E,
+ 0x4C50,
+ 0x4C51,
+ 0x4C52,
+ 0x4C53,
+ 0x5654,
+ 0x5655,
+ 0x5656,
+ /* r128 */
+ 0x4c45,
+ 0x4c46,
+ 0x4d46,
+ 0x4d4c,
+ 0x5041,
+ 0x5042,
+ 0x5043,
+ 0x5044,
+ 0x5045,
+ 0x5046,
+ 0x5047,
+ 0x5048,
+ 0x5049,
+ 0x504A,
+ 0x504B,
+ 0x504C,
+ 0x504D,
+ 0x504E,
+ 0x504F,
+ 0x5050,
+ 0x5051,
+ 0x5052,
+ 0x5053,
+ 0x5054,
+ 0x5055,
+ 0x5056,
+ 0x5057,
+ 0x5058,
+ 0x5245,
+ 0x5246,
+ 0x5247,
+ 0x524b,
+ 0x524c,
+ 0x534d,
+ 0x5446,
+ 0x544C,
+ 0x5452,
+ /* radeon */
+ 0x3150,
+ 0x3151,
+ 0x3152,
+ 0x3154,
+ 0x3155,
+ 0x3E50,
+ 0x3E54,
+ 0x4136,
+ 0x4137,
+ 0x4144,
+ 0x4145,
+ 0x4146,
+ 0x4147,
+ 0x4148,
+ 0x4149,
+ 0x414A,
+ 0x414B,
+ 0x4150,
+ 0x4151,
+ 0x4152,
+ 0x4153,
+ 0x4154,
+ 0x4155,
+ 0x4156,
+ 0x4237,
+ 0x4242,
+ 0x4336,
+ 0x4337,
+ 0x4437,
+ 0x4966,
+ 0x4967,
+ 0x4A48,
+ 0x4A49,
+ 0x4A4A,
+ 0x4A4B,
+ 0x4A4C,
+ 0x4A4D,
+ 0x4A4E,
+ 0x4A4F,
+ 0x4A50,
+ 0x4A54,
+ 0x4B48,
+ 0x4B49,
+ 0x4B4A,
+ 0x4B4B,
+ 0x4B4C,
+ 0x4C57,
+ 0x4C58,
+ 0x4C59,
+ 0x4C5A,
+ 0x4C64,
+ 0x4C66,
+ 0x4C67,
+ 0x4E44,
+ 0x4E45,
+ 0x4E46,
+ 0x4E47,
+ 0x4E48,
+ 0x4E49,
+ 0x4E4A,
+ 0x4E4B,
+ 0x4E50,
+ 0x4E51,
+ 0x4E52,
+ 0x4E53,
+ 0x4E54,
+ 0x4E56,
+ 0x5144,
+ 0x5145,
+ 0x5146,
+ 0x5147,
+ 0x5148,
+ 0x514C,
+ 0x514D,
+ 0x5157,
+ 0x5158,
+ 0x5159,
+ 0x515A,
+ 0x515E,
+ 0x5460,
+ 0x5462,
+ 0x5464,
+ 0x5548,
+ 0x5549,
+ 0x554A,
+ 0x554B,
+ 0x554C,
+ 0x554D,
+ 0x554E,
+ 0x554F,
+ 0x5550,
+ 0x5551,
+ 0x5552,
+ 0x5554,
+ 0x564A,
+ 0x564B,
+ 0x564F,
+ 0x5652,
+ 0x5653,
+ 0x5657,
+ 0x5834,
+ 0x5835,
+ 0x5954,
+ 0x5955,
+ 0x5974,
+ 0x5975,
+ 0x5960,
+ 0x5961,
+ 0x5962,
+ 0x5964,
+ 0x5965,
+ 0x5969,
+ 0x5a41,
+ 0x5a42,
+ 0x5a61,
+ 0x5a62,
+ 0x5b60,
+ 0x5b62,
+ 0x5b63,
+ 0x5b64,
+ 0x5b65,
+ 0x5c61,
+ 0x5c63,
+ 0x5d48,
+ 0x5d49,
+ 0x5d4a,
+ 0x5d4c,
+ 0x5d4d,
+ 0x5d4e,
+ 0x5d4f,
+ 0x5d50,
+ 0x5d52,
+ 0x5d57,
+ 0x5e48,
+ 0x5e4a,
+ 0x5e4b,
+ 0x5e4c,
+ 0x5e4d,
+ 0x5e4f,
+ 0x6700,
+ 0x6701,
+ 0x6702,
+ 0x6703,
+ 0x6704,
+ 0x6705,
+ 0x6706,
+ 0x6707,
+ 0x6708,
+ 0x6709,
+ 0x6718,
+ 0x6719,
+ 0x671c,
+ 0x671d,
+ 0x671f,
+ 0x6720,
+ 0x6721,
+ 0x6722,
+ 0x6723,
+ 0x6724,
+ 0x6725,
+ 0x6726,
+ 0x6727,
+ 0x6728,
+ 0x6729,
+ 0x6738,
+ 0x6739,
+ 0x673e,
+ 0x6740,
+ 0x6741,
+ 0x6742,
+ 0x6743,
+ 0x6744,
+ 0x6745,
+ 0x6746,
+ 0x6747,
+ 0x6748,
+ 0x6749,
+ 0x674A,
+ 0x6750,
+ 0x6751,
+ 0x6758,
+ 0x6759,
+ 0x675B,
+ 0x675D,
+ 0x675F,
+ 0x6760,
+ 0x6761,
+ 0x6762,
+ 0x6763,
+ 0x6764,
+ 0x6765,
+ 0x6766,
+ 0x6767,
+ 0x6768,
+ 0x6770,
+ 0x6771,
+ 0x6772,
+ 0x6778,
+ 0x6779,
+ 0x677B,
+ 0x6840,
+ 0x6841,
+ 0x6842,
+ 0x6843,
+ 0x6849,
+ 0x684C,
+ 0x6850,
+ 0x6858,
+ 0x6859,
+ 0x6880,
+ 0x6888,
+ 0x6889,
+ 0x688A,
+ 0x688C,
+ 0x688D,
+ 0x6898,
+ 0x6899,
+ 0x689b,
+ 0x689c,
+ 0x689d,
+ 0x689e,
+ 0x68a0,
+ 0x68a1,
+ 0x68a8,
+ 0x68a9,
+ 0x68b0,
+ 0x68b8,
+ 0x68b9,
+ 0x68ba,
+ 0x68be,
+ 0x68bf,
+ 0x68c0,
+ 0x68c1,
+ 0x68c7,
+ 0x68c8,
+ 0x68c9,
+ 0x68d8,
+ 0x68d9,
+ 0x68da,
+ 0x68de,
+ 0x68e0,
+ 0x68e1,
+ 0x68e4,
+ 0x68e5,
+ 0x68e8,
+ 0x68e9,
+ 0x68f1,
+ 0x68f2,
+ 0x68f8,
+ 0x68f9,
+ 0x68fa,
+ 0x68fe,
+ 0x7100,
+ 0x7101,
+ 0x7102,
+ 0x7103,
+ 0x7104,
+ 0x7105,
+ 0x7106,
+ 0x7108,
+ 0x7109,
+ 0x710A,
+ 0x710B,
+ 0x710C,
+ 0x710E,
+ 0x710F,
+ 0x7140,
+ 0x7141,
+ 0x7142,
+ 0x7143,
+ 0x7144,
+ 0x7145,
+ 0x7146,
+ 0x7147,
+ 0x7149,
+ 0x714A,
+ 0x714B,
+ 0x714C,
+ 0x714D,
+ 0x714E,
+ 0x714F,
+ 0x7151,
+ 0x7152,
+ 0x7153,
+ 0x715E,
+ 0x715F,
+ 0x7180,
+ 0x7181,
+ 0x7183,
+ 0x7186,
+ 0x7187,
+ 0x7188,
+ 0x718A,
+ 0x718B,
+ 0x718C,
+ 0x718D,
+ 0x718F,
+ 0x7193,
+ 0x7196,
+ 0x719B,
+ 0x719F,
+ 0x71C0,
+ 0x71C1,
+ 0x71C2,
+ 0x71C3,
+ 0x71C4,
+ 0x71C5,
+ 0x71C6,
+ 0x71C7,
+ 0x71CD,
+ 0x71CE,
+ 0x71D2,
+ 0x71D4,
+ 0x71D5,
+ 0x71D6,
+ 0x71DA,
+ 0x71DE,
+ 0x7200,
+ 0x7210,
+ 0x7211,
+ 0x7240,
+ 0x7243,
+ 0x7244,
+ 0x7245,
+ 0x7246,
+ 0x7247,
+ 0x7248,
+ 0x7249,
+ 0x724A,
+ 0x724B,
+ 0x724C,
+ 0x724D,
+ 0x724E,
+ 0x724F,
+ 0x7280,
+ 0x7281,
+ 0x7283,
+ 0x7284,
+ 0x7287,
+ 0x7288,
+ 0x7289,
+ 0x728B,
+ 0x728C,
+ 0x7290,
+ 0x7291,
+ 0x7293,
+ 0x7297,
+ 0x7834,
+ 0x7835,
+ 0x791e,
+ 0x791f,
+ 0x793f,
+ 0x7941,
+ 0x7942,
+ 0x796c,
+ 0x796d,
+ 0x796e,
+ 0x796f,
+ 0x9400,
+ 0x9401,
+ 0x9402,
+ 0x9403,
+ 0x9405,
+ 0x940A,
+ 0x940B,
+ 0x940F,
+ 0x94A0,
+ 0x94A1,
+ 0x94A3,
+ 0x94B1,
+ 0x94B3,
+ 0x94B4,
+ 0x94B5,
+ 0x94B9,
+ 0x9440,
+ 0x9441,
+ 0x9442,
+ 0x9443,
+ 0x9444,
+ 0x9446,
+ 0x944A,
+ 0x944B,
+ 0x944C,
+ 0x944E,
+ 0x9450,
+ 0x9452,
+ 0x9456,
+ 0x945A,
+ 0x945B,
+ 0x945E,
+ 0x9460,
+ 0x9462,
+ 0x946A,
+ 0x946B,
+ 0x947A,
+ 0x947B,
+ 0x9480,
+ 0x9487,
+ 0x9488,
+ 0x9489,
+ 0x948A,
+ 0x948F,
+ 0x9490,
+ 0x9491,
+ 0x9495,
+ 0x9498,
+ 0x949C,
+ 0x949E,
+ 0x949F,
+ 0x94C0,
+ 0x94C1,
+ 0x94C3,
+ 0x94C4,
+ 0x94C5,
+ 0x94C6,
+ 0x94C7,
+ 0x94C8,
+ 0x94C9,
+ 0x94CB,
+ 0x94CC,
+ 0x94CD,
+ 0x9500,
+ 0x9501,
+ 0x9504,
+ 0x9505,
+ 0x9506,
+ 0x9507,
+ 0x9508,
+ 0x9509,
+ 0x950F,
+ 0x9511,
+ 0x9515,
+ 0x9517,
+ 0x9519,
+ 0x9540,
+ 0x9541,
+ 0x9542,
+ 0x954E,
+ 0x954F,
+ 0x9552,
+ 0x9553,
+ 0x9555,
+ 0x9557,
+ 0x955f,
+ 0x9580,
+ 0x9581,
+ 0x9583,
+ 0x9586,
+ 0x9587,
+ 0x9588,
+ 0x9589,
+ 0x958A,
+ 0x958B,
+ 0x958C,
+ 0x958D,
+ 0x958E,
+ 0x958F,
+ 0x9590,
+ 0x9591,
+ 0x9593,
+ 0x9595,
+ 0x9596,
+ 0x9597,
+ 0x9598,
+ 0x9599,
+ 0x959B,
+ 0x95C0,
+ 0x95C2,
+ 0x95C4,
+ 0x95C5,
+ 0x95C6,
+ 0x95C7,
+ 0x95C9,
+ 0x95CC,
+ 0x95CD,
+ 0x95CE,
+ 0x95CF,
+ 0x9610,
+ 0x9611,
+ 0x9612,
+ 0x9613,
+ 0x9614,
+ 0x9615,
+ 0x9616,
+ 0x9640,
+ 0x9641,
+ 0x9642,
+ 0x9643,
+ 0x9644,
+ 0x9645,
+ 0x9647,
+ 0x9648,
+ 0x9649,
+ 0x964a,
+ 0x964b,
+ 0x964c,
+ 0x964e,
+ 0x964f,
+ 0x9710,
+ 0x9711,
+ 0x9712,
+ 0x9713,
+ 0x9714,
+ 0x9715,
+ 0x9802,
+ 0x9803,
+ 0x9804,
+ 0x9805,
+ 0x9806,
+ 0x9807,
+ 0x9808,
+ 0x9809,
+ 0x980A,
+ 0x9900,
+ 0x9901,
+ 0x9903,
+ 0x9904,
+ 0x9905,
+ 0x9906,
+ 0x9907,
+ 0x9908,
+ 0x9909,
+ 0x990A,
+ 0x990B,
+ 0x990C,
+ 0x990D,
+ 0x990E,
+ 0x990F,
+ 0x9910,
+ 0x9913,
+ 0x9917,
+ 0x9918,
+ 0x9919,
+ 0x9990,
+ 0x9991,
+ 0x9992,
+ 0x9993,
+ 0x9994,
+ 0x9995,
+ 0x9996,
+ 0x9997,
+ 0x9998,
+ 0x9999,
+ 0x999A,
+ 0x999B,
+ 0x999C,
+ 0x999D,
+ 0x99A0,
+ 0x99A2,
+ 0x99A4,
+ /* radeon secondary ids */
+ 0x3171,
+ 0x3e70,
+ 0x4164,
+ 0x4165,
+ 0x4166,
+ 0x4168,
+ 0x4170,
+ 0x4171,
+ 0x4172,
+ 0x4173,
+ 0x496e,
+ 0x4a69,
+ 0x4a6a,
+ 0x4a6b,
+ 0x4a70,
+ 0x4a74,
+ 0x4b69,
+ 0x4b6b,
+ 0x4b6c,
+ 0x4c6e,
+ 0x4e64,
+ 0x4e65,
+ 0x4e66,
+ 0x4e67,
+ 0x4e68,
+ 0x4e69,
+ 0x4e6a,
+ 0x4e71,
+ 0x4f73,
+ 0x5569,
+ 0x556b,
+ 0x556d,
+ 0x556f,
+ 0x5571,
+ 0x5854,
+ 0x5874,
+ 0x5940,
+ 0x5941,
+ 0x5b70,
+ 0x5b72,
+ 0x5b73,
+ 0x5b74,
+ 0x5b75,
+ 0x5d44,
+ 0x5d45,
+ 0x5d6d,
+ 0x5d6f,
+ 0x5d72,
+ 0x5d77,
+ 0x5e6b,
+ 0x5e6d,
+ 0x7120,
+ 0x7124,
+ 0x7129,
+ 0x712e,
+ 0x712f,
+ 0x7162,
+ 0x7163,
+ 0x7166,
+ 0x7167,
+ 0x7172,
+ 0x7173,
+ 0x71a0,
+ 0x71a1,
+ 0x71a3,
+ 0x71a7,
+ 0x71bb,
+ 0x71e0,
+ 0x71e1,
+ 0x71e2,
+ 0x71e6,
+ 0x71e7,
+ 0x71f2,
+ 0x7269,
+ 0x726b,
+ 0x726e,
+ 0x72a0,
+ 0x72a8,
+ 0x72b1,
+ 0x72b3,
+ 0x793f,
+};
+
static const struct pci_device_id pciidlist[] = {
-#ifdef CONFIG_DRM_AMDGPU_SI
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
{0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
{0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
@@ -307,8 +1912,6 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
{0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
{0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
-#endif
-#ifdef CONFIG_DRM_AMDGPU_CIK
/* Kaveri */
{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
@@ -391,7 +1994,6 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
-#endif
/* topaz */
{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
{0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
@@ -442,6 +2044,7 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
{0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
{0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x6FDF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
/* Polaris12 */
{0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
@@ -449,50 +2052,288 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ {0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ /* VEGAM */
+ {0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
+ {0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
+ {0x1002, 0x694F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
/* Vega 10 */
- {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6862, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6863, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6862, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6863, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x6869, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x686f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
+ /* Vega 12 */
+ {0x1002, 0x69A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
+ {0x1002, 0x69A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
+ {0x1002, 0x69A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
+ {0x1002, 0x69A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
+ {0x1002, 0x69AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
+ /* Vega 20 */
+ {0x1002, 0x66A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+ {0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+ {0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+ {0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+ {0x1002, 0x66A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+ {0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+ {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
+ /* Raven */
+ {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
+ {0x1002, 0x15d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
+ /* Arcturus */
+ {0x1002, 0x738C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
+ {0x1002, 0x7388, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
+ {0x1002, 0x738E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
+ {0x1002, 0x7390, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
+ /* Navi10 */
+ {0x1002, 0x7310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
+ {0x1002, 0x7312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
+ {0x1002, 0x7318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
+ {0x1002, 0x7319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
+ {0x1002, 0x731A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
+ {0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
+ {0x1002, 0x731E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
+ {0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
+ /* Navi14 */
+ {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
+ {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
+ {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
+ {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
+
+ /* Renoir */
+ {0x1002, 0x15E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
+ {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
+ {0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
+ {0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
+
+ /* Navi12 */
+ {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
+ {0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
+
+ /* Sienna_Cichlid */
+ {0x1002, 0x73A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73A5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73A8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73A9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73AC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73AD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+ {0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+
+ /* Yellow Carp */
+ {0x1002, 0x164D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_YELLOW_CARP|AMD_IS_APU},
+ {0x1002, 0x1681, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_YELLOW_CARP|AMD_IS_APU},
+
+ /* Navy_Flounder */
+ {0x1002, 0x73C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73DC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73DD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+ {0x1002, 0x73DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+
+ /* DIMGREY_CAVEFISH */
+ {0x1002, 0x73E0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73E2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73EA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73EC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73ED, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73EF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ {0x1002, 0x73FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+
+ /* Aldebaran */
+ {0x1002, 0x7408, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN},
+ {0x1002, 0x740C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN},
+ {0x1002, 0x740F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN},
+ {0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN},
+
+ /* CYAN_SKILLFISH */
+ {0x1002, 0x13DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13F9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13FA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13FB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13FC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x13FE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+ {0x1002, 0x143F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
+
+ /* BEIGE_GOBY */
+ {0x1002, 0x7420, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+ {0x1002, 0x7421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+ {0x1002, 0x7422, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+ {0x1002, 0x7423, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+ {0x1002, 0x7424, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+ {0x1002, 0x743F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+
+ { PCI_DEVICE(0x1002, PCI_ANY_ID),
+ .class = PCI_CLASS_DISPLAY_VGA << 8,
+ .class_mask = 0xffffff,
+ .driver_data = CHIP_IP_DISCOVERY },
+
+ { PCI_DEVICE(0x1002, PCI_ANY_ID),
+ .class = PCI_CLASS_DISPLAY_OTHER << 8,
+ .class_mask = 0xffffff,
+ .driver_data = CHIP_IP_DISCOVERY },
+
+ { PCI_DEVICE(0x1002, PCI_ANY_ID),
+ .class = PCI_CLASS_ACCELERATOR_PROCESSING << 8,
+ .class_mask = 0xffffff,
+ .driver_data = CHIP_IP_DISCOVERY },
+
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
-static struct drm_driver kms_driver;
+static const struct amdgpu_asic_type_quirk asic_type_quirks[] = {
+ /* differentiate between P10 and P11 asics with the same DID */
+ {0x67FF, 0xE3, CHIP_POLARIS10},
+ {0x67FF, 0xE7, CHIP_POLARIS10},
+ {0x67FF, 0xF3, CHIP_POLARIS10},
+ {0x67FF, 0xF7, CHIP_POLARIS10},
+};
-static int amdgpu_kick_out_firmware_fb(struct pci_dev *pdev)
+static const struct drm_driver amdgpu_kms_driver;
+
+static void amdgpu_get_secondary_funcs(struct amdgpu_device *adev)
{
- struct apertures_struct *ap;
- bool primary = false;
+ struct pci_dev *p = NULL;
+ int i;
- ap = alloc_apertures(1);
- if (!ap)
- return -ENOMEM;
+ /* 0 - GPU
+ * 1 - audio
+ * 2 - USB
+ * 3 - UCSI
+ */
+ for (i = 1; i < 4; i++) {
+ p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
+ adev->pdev->bus->number, i);
+ if (p) {
+ pm_runtime_get_sync(&p->dev);
+ pm_runtime_mark_last_busy(&p->dev);
+ pm_runtime_put_autosuspend(&p->dev);
+ pci_dev_put(p);
+ }
+ }
+}
- ap->ranges[0].base = pci_resource_start(pdev, 0);
- ap->ranges[0].size = pci_resource_len(pdev, 0);
+static void amdgpu_init_debug_options(struct amdgpu_device *adev)
+{
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_VM) {
+ pr_info("debug: VM handling debug enabled\n");
+ adev->debug_vm = true;
+ }
-#ifdef CONFIG_X86
- primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
-#endif
- drm_fb_helper_remove_conflicting_framebuffers(ap, "amdgpudrmfb", primary);
- kfree(ap);
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_LARGEBAR) {
+ pr_info("debug: enabled simulating large-bar capability on non-large bar system\n");
+ adev->debug_largebar = true;
+ }
- return 0;
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_DISABLE_GPU_SOFT_RECOVERY) {
+ pr_info("debug: soft reset for GPU recovery disabled\n");
+ adev->debug_disable_soft_recovery = true;
+ }
+
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_USE_VRAM_FW_BUF) {
+ pr_info("debug: place fw in vram for frontdoor loading\n");
+ adev->debug_use_vram_fw_buf = true;
+ }
+
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_ENABLE_RAS_ACA) {
+ pr_info("debug: enable RAS ACA\n");
+ adev->debug_enable_ras_aca = true;
+ }
+
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_ENABLE_EXP_RESETS) {
+ pr_info("debug: enable experimental reset features\n");
+ adev->debug_exp_resets = true;
+ }
+
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_DISABLE_GPU_RING_RESET) {
+ pr_info("debug: ring reset disabled\n");
+ adev->debug_disable_gpu_ring_reset = true;
+ }
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_SMU_POOL) {
+ pr_info("debug: use vram for smu pool\n");
+ adev->pm.smu_debug_mask |= SMU_DEBUG_POOL_USE_VRAM;
+ }
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_VM_USERPTR) {
+ pr_info("debug: VM mode debug for userptr is enabled\n");
+ adev->debug_vm_userptr = true;
+ }
+
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_DISABLE_RAS_CE_LOG) {
+ pr_info("debug: disable kernel logs of correctable errors\n");
+ adev->debug_disable_ce_logs = true;
+ }
+}
+
+static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(asic_type_quirks); i++) {
+ if (pdev->device == asic_type_quirks[i].device &&
+ pdev->revision == asic_type_quirks[i].revision) {
+ flags &= ~AMD_ASIC_MASK;
+ flags |= asic_type_quirks[i].type;
+ break;
+ }
+ }
+
+ return flags;
}
static int amdgpu_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
+ struct drm_device *ddev;
+ struct amdgpu_device *adev;
unsigned long flags = ent->driver_data;
- int ret;
+ int ret, retry = 0, i;
+ bool supports_atomic = false;
+
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA ||
+ (pdev->class >> 8) == PCI_CLASS_DISPLAY_OTHER) {
+ if (drm_firmware_drivers_only() && amdgpu_modeset == -1)
+ return -EINVAL;
+ }
+
+ /* skip devices which are owned by radeon */
+ for (i = 0; i < ARRAY_SIZE(amdgpu_unsupported_pciidlist); i++) {
+ if (amdgpu_unsupported_pciidlist[i] == pdev->device)
+ return -ENODEV;
+ }
+
+ if (amdgpu_aspm == -1 && !pcie_aspm_enabled(pdev))
+ amdgpu_aspm = 0;
+
+ if (amdgpu_virtual_display ||
+ amdgpu_device_asic_has_dc_support(pdev, flags & AMD_ASIC_MASK))
+ supports_atomic = true;
if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
DRM_INFO("This hardware requires experimental hardware support.\n"
@@ -500,123 +2341,529 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
return -ENODEV;
}
- /*
- * Initialize amdkfd before starting radeon. If it was not loaded yet,
- * defer radeon probing
+ flags = amdgpu_fix_asic_type(pdev, flags);
+
+ /* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping,
+ * however, SME requires an indirect IOMMU mapping because the encryption
+ * bit is beyond the DMA mask of the chip.
*/
- ret = amdgpu_amdkfd_init();
- if (ret == -EPROBE_DEFER)
- return ret;
+ if (cc_platform_has(CC_ATTR_MEM_ENCRYPT) &&
+ ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) {
+ dev_info(&pdev->dev,
+ "SME is not compatible with RAVEN\n");
+ return -ENOTSUPP;
+ }
+
+ switch (flags & AMD_ASIC_MASK) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ case CHIP_HAINAN:
+#ifdef CONFIG_DRM_AMDGPU_SI
+ if (!amdgpu_si_support) {
+ dev_info(&pdev->dev,
+ "SI support provided by radeon.\n");
+ dev_info(&pdev->dev,
+ "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
+ );
+ return -ENODEV;
+ }
+ break;
+#else
+ dev_info(&pdev->dev, "amdgpu is built without SI support.\n");
+ return -ENODEV;
+#endif
+ case CHIP_KAVERI:
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ if (!amdgpu_cik_support) {
+ dev_info(&pdev->dev,
+ "CIK support provided by radeon.\n");
+ dev_info(&pdev->dev,
+ "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
+ );
+ return -ENODEV;
+ }
+ break;
+#else
+ dev_info(&pdev->dev, "amdgpu is built without CIK support.\n");
+ return -ENODEV;
+#endif
+ default:
+ break;
+ }
+
+ adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev);
+ if (IS_ERR(adev))
+ return PTR_ERR(adev);
+
+ adev->dev = &pdev->dev;
+ adev->pdev = pdev;
+ ddev = adev_to_drm(adev);
- /* Get rid of things like offb */
- ret = amdgpu_kick_out_firmware_fb(pdev);
+ if (!supports_atomic)
+ ddev->driver_features &= ~DRIVER_ATOMIC;
+
+ ret = pci_enable_device(pdev);
if (ret)
return ret;
- return drm_get_pci_dev(pdev, ent, &kms_driver);
+ pci_set_drvdata(pdev, ddev);
+
+ amdgpu_init_debug_options(adev);
+
+ ret = amdgpu_driver_load_kms(adev, flags);
+ if (ret)
+ goto err_pci;
+
+retry_init:
+ ret = drm_dev_register(ddev, flags);
+ if (ret == -EAGAIN && ++retry <= 3) {
+ DRM_INFO("retry init %d\n", retry);
+ /* Don't request EX mode too frequently which is attacking */
+ msleep(5000);
+ goto retry_init;
+ } else if (ret) {
+ goto err_pci;
+ }
+
+ ret = amdgpu_xcp_dev_register(adev, ent);
+ if (ret)
+ goto err_pci;
+
+ ret = amdgpu_amdkfd_drm_client_create(adev);
+ if (ret)
+ goto err_pci;
+
+ /*
+ * 1. don't init fbdev on hw without DCE
+ * 2. don't init fbdev if there are no connectors
+ */
+ if (adev->mode_info.mode_config_initialized &&
+ !list_empty(&adev_to_drm(adev)->mode_config.connector_list)) {
+ const struct drm_format_info *format;
+
+ /* select 8 bpp console on low vram cards */
+ if (adev->gmc.real_vram_size <= (32*1024*1024))
+ format = drm_format_info(DRM_FORMAT_C8);
+ else
+ format = NULL;
+
+ drm_client_setup(adev_to_drm(adev), format);
+ }
+
+ ret = amdgpu_debugfs_init(adev);
+ if (ret)
+ DRM_ERROR("Creating debugfs files failed (%d).\n", ret);
+
+ if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) {
+ /* only need to skip on ATPX */
+ if (amdgpu_device_supports_px(adev))
+ dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
+ /* we want direct complete for BOCO */
+ if (amdgpu_device_supports_boco(adev))
+ dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_SMART_PREPARE |
+ DPM_FLAG_SMART_SUSPEND |
+ DPM_FLAG_MAY_SKIP_RESUME);
+ pm_runtime_use_autosuspend(ddev->dev);
+ pm_runtime_set_autosuspend_delay(ddev->dev, 5000);
+
+ pm_runtime_allow(ddev->dev);
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ pci_wake_from_d3(pdev, TRUE);
+
+ /*
+ * For runpm implemented via BACO, PMFW will handle the
+ * timing for BACO in and out:
+ * - put ASIC into BACO state only when both video and
+ * audio functions are in D3 state.
+ * - pull ASIC out of BACO state when either video or
+ * audio function is in D0 state.
+ * Also, at startup, PMFW assumes both functions are in
+ * D0 state.
+ *
+ * So if snd driver was loaded prior to amdgpu driver
+ * and audio function was put into D3 state, there will
+ * be no PMFW-aware D-state transition(D0->D3) on runpm
+ * suspend. Thus the BACO will be not correctly kicked in.
+ *
+ * Via amdgpu_get_secondary_funcs(), the audio dev is put
+ * into D0 state. Then there will be a PMFW-aware D-state
+ * transition(D0->D3) on runpm suspend.
+ */
+ if (amdgpu_device_supports_baco(adev) &&
+ !(adev->flags & AMD_IS_APU) &&
+ adev->asic_type >= CHIP_NAVI10)
+ amdgpu_get_secondary_funcs(adev);
+ }
+
+ return 0;
+
+err_pci:
+ pci_disable_device(pdev);
+ return ret;
}
static void
amdgpu_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
- drm_put_dev(dev);
+ amdgpu_ras_eeprom_check_and_recover(adev);
+ amdgpu_xcp_dev_unplug(adev);
+ amdgpu_gmc_prepare_nps_mode_change(adev);
+ drm_dev_unplug(dev);
+
+ if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) {
+ pm_runtime_get_sync(dev->dev);
+ pm_runtime_forbid(dev->dev);
+ }
+
+ amdgpu_driver_unload_kms(dev);
+
+ /*
+ * Flush any in flight DMA operations from device.
+ * Clear the Bus Master Enable bit and then wait on the PCIe Device
+ * StatusTransactions Pending bit.
+ */
+ pci_disable_device(pdev);
+ pci_wait_for_pending_transaction(pdev);
}
static void
amdgpu_pci_shutdown(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+
+ if (amdgpu_ras_intr_triggered())
+ return;
+
+ /* device maybe not resumed here, return immediately in this case */
+ if (adev->in_s4 && adev->in_suspend)
+ return;
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown.
* unfortunately we can't detect certain
* hypervisors so just do this all the time.
*/
- amdgpu_suspend(adev);
+ if (!amdgpu_passthrough(adev))
+ adev->mp1_state = PP_MP1_STATE_UNLOAD;
+ amdgpu_device_ip_suspend(adev);
+ adev->mp1_state = PP_MP1_STATE_NONE;
+}
+
+static int amdgpu_pmops_prepare(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
+ /* device maybe not resumed here, return immediately in this case */
+ if (adev->in_s4 && adev->in_suspend)
+ return 0;
+
+ /* Return a positive number here so
+ * DPM_FLAG_SMART_SUSPEND works properly
+ */
+ if (amdgpu_device_supports_boco(adev) && pm_runtime_suspended(dev))
+ return 1;
+
+ /* if we will not support s3 or s2i for the device
+ * then skip suspend
+ */
+ if (!amdgpu_acpi_is_s0ix_active(adev) &&
+ !amdgpu_acpi_is_s3_active(adev))
+ return 1;
+
+ return amdgpu_device_prepare(drm_dev);
+}
+
+static void amdgpu_pmops_complete(struct device *dev)
+{
+ amdgpu_device_complete(dev_get_drvdata(dev));
}
static int amdgpu_pmops_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
+ if (amdgpu_acpi_is_s0ix_active(adev))
+ adev->in_s0ix = true;
+ else if (amdgpu_acpi_is_s3_active(adev))
+ adev->in_s3 = true;
+ if (!adev->in_s0ix && !adev->in_s3) {
+#if IS_ENABLED(CONFIG_SUSPEND)
+ /* don't allow going deep first time followed by s2idle the next time */
+ if (adev->last_suspend_state != PM_SUSPEND_ON &&
+ adev->last_suspend_state != pm_suspend_target_state) {
+ drm_err_once(drm_dev, "Unsupported suspend state %d\n",
+ pm_suspend_target_state);
+ return -EINVAL;
+ }
+#endif
+ return 0;
+ }
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return amdgpu_device_suspend(drm_dev, true, true);
+#if IS_ENABLED(CONFIG_SUSPEND)
+ /* cache the state last used for suspend */
+ adev->last_suspend_state = pm_suspend_target_state;
+#endif
+
+ return amdgpu_device_suspend(drm_dev, true);
+}
+
+static int amdgpu_pmops_suspend_noirq(struct device *dev)
+{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
+ if (amdgpu_acpi_should_gpu_reset(adev))
+ return amdgpu_asic_reset(adev);
+
+ return 0;
}
static int amdgpu_pmops_resume(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ int r;
- /* GPU comes up enabled by the bios on resume */
- if (amdgpu_device_is_px(drm_dev)) {
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- }
+ if (!adev->in_s0ix && !adev->in_s3)
+ return 0;
+
+ /* Avoids registers access if device is physically gone */
+ if (!pci_device_is_present(adev->pdev))
+ adev->no_hw_access = true;
- return amdgpu_device_resume(drm_dev, true, true);
+ r = amdgpu_device_resume(drm_dev, true);
+ if (amdgpu_acpi_is_s0ix_active(adev))
+ adev->in_s0ix = false;
+ else
+ adev->in_s3 = false;
+ return r;
}
static int amdgpu_pmops_freeze(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ int r;
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return amdgpu_device_suspend(drm_dev, false, true);
+ r = amdgpu_device_suspend(drm_dev, true);
+ if (r)
+ return r;
+
+ if (amdgpu_acpi_should_gpu_reset(adev))
+ return amdgpu_asic_reset(adev);
+ return 0;
}
static int amdgpu_pmops_thaw(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return amdgpu_device_resume(drm_dev, false, true);
+ /* do not resume device if it's normal hibernation */
+ if (!pm_hibernate_is_recovering() && !pm_hibernation_mode_is_suspend())
+ return 0;
+
+ return amdgpu_device_resume(drm_dev, true);
}
static int amdgpu_pmops_poweroff(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return amdgpu_device_suspend(drm_dev, true, true);
+ /* device maybe not resumed here, return immediately in this case */
+ if (adev->in_s4 && adev->in_suspend)
+ return 0;
+
+ return amdgpu_device_suspend(drm_dev, true);
}
static int amdgpu_pmops_restore(struct device *dev)
{
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+
+ return amdgpu_device_resume(drm_dev, true);
+}
+
+static int amdgpu_runtime_idle_check_display(struct device *dev)
+{
struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
+ if (adev->mode_info.num_crtc) {
+ struct drm_connector *list_connector;
+ struct drm_connector_list_iter iter;
+ int ret = 0;
+
+ if (amdgpu_runtime_pm != -2) {
+ /* XXX: Return busy if any displays are connected to avoid
+ * possible display wakeups after runtime resume due to
+ * hotplug events in case any displays were connected while
+ * the GPU was in suspend. Remove this once that is fixed.
+ */
+ mutex_lock(&drm_dev->mode_config.mutex);
+ drm_connector_list_iter_begin(drm_dev, &iter);
+ drm_for_each_connector_iter(list_connector, &iter) {
+ if (list_connector->status == connector_status_connected) {
+ ret = -EBUSY;
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+ mutex_unlock(&drm_dev->mode_config.mutex);
+
+ if (ret)
+ return ret;
+ }
+ if (adev->dc_enabled) {
+ struct drm_crtc *crtc;
+
+ drm_for_each_crtc(crtc, drm_dev) {
+ drm_modeset_lock(&crtc->mutex, NULL);
+ if (crtc->state->active)
+ ret = -EBUSY;
+ drm_modeset_unlock(&crtc->mutex);
+ if (ret < 0)
+ break;
+ }
+ } else {
+ mutex_lock(&drm_dev->mode_config.mutex);
+ drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);
+
+ drm_connector_list_iter_begin(drm_dev, &iter);
+ drm_for_each_connector_iter(list_connector, &iter) {
+ if (list_connector->dpms == DRM_MODE_DPMS_ON) {
+ ret = -EBUSY;
+ break;
+ }
+ }
+
+ drm_connector_list_iter_end(&iter);
+
+ drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
+ mutex_unlock(&drm_dev->mode_config.mutex);
+ }
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int amdgpu_runtime_idle_check_userq(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- return amdgpu_device_resume(drm_dev, false, true);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0;
+
+ mutex_lock(&adev->userq_mutex);
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ ret = -EBUSY;
+ goto done;
+ }
+ }
+done:
+ mutex_unlock(&adev->userq_mutex);
+
+ return ret;
}
static int amdgpu_pmops_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- int ret;
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ int ret, i;
- if (!amdgpu_device_is_px(drm_dev)) {
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE) {
pm_runtime_forbid(dev);
return -EBUSY;
}
- drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- drm_kms_helper_poll_disable(drm_dev);
- vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
+ ret = amdgpu_runtime_idle_check_display(dev);
+ if (ret)
+ return ret;
+ ret = amdgpu_runtime_idle_check_userq(dev);
+ if (ret)
+ return ret;
+
+ /* wait for all rings to drain before suspending */
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
- ret = amdgpu_device_suspend(drm_dev, false, false);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_ignore_hotplug(pdev);
- if (amdgpu_is_atpx_hybrid())
+ if (ring && ring->sched.ready) {
+ ret = amdgpu_fence_wait_empty(ring);
+ if (ret)
+ return -EBUSY;
+ }
+ }
+
+ adev->in_runpm = true;
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_PX)
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+
+ /*
+ * By setting mp1_state as PP_MP1_STATE_UNLOAD, MP1 will do some
+ * proper cleanups and put itself into a state ready for PNP. That
+ * can address some random resuming failure observed on BOCO capable
+ * platforms.
+ * TODO: this may be also needed for PX capable platform.
+ */
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO)
+ adev->mp1_state = PP_MP1_STATE_UNLOAD;
+
+ ret = amdgpu_device_prepare(drm_dev);
+ if (ret)
+ return ret;
+ ret = amdgpu_device_suspend(drm_dev, false);
+ if (ret) {
+ adev->in_runpm = false;
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO)
+ adev->mp1_state = PP_MP1_STATE_NONE;
+ return ret;
+ }
+
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO)
+ adev->mp1_state = PP_MP1_STATE_NONE;
+
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_PX) {
+ /* Only need to handle PCI state in the driver for ATPX
+ * PCI core handles it for _PR3.
+ */
+ amdgpu_device_cache_pci_state(pdev);
+ pci_disable_device(pdev);
+ pci_ignore_hotplug(pdev);
pci_set_power_state(pdev, PCI_D3cold);
- else if (!amdgpu_has_atpx_dgpu_power_cntl())
- pci_set_power_state(pdev, PCI_D3hot);
- drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+ } else if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO) {
+ /* nothing to do */
+ } else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
+ (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) {
+ amdgpu_device_baco_enter(adev);
+ }
+
+ dev_dbg(&pdev->dev, "asic/device is runtime suspended\n");
return 0;
}
@@ -625,51 +2872,87 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
int ret;
- if (!amdgpu_device_is_px(drm_dev))
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE)
return -EINVAL;
- drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
+ /* Avoids registers access if device is physically gone */
+ if (!pci_device_is_present(adev->pdev))
+ adev->no_hw_access = true;
+
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_PX) {
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
- if (amdgpu_is_atpx_hybrid() ||
- !amdgpu_has_atpx_dgpu_power_cntl())
+ /* Only need to handle PCI state in the driver for ATPX
+ * PCI core handles it for _PR3.
+ */
pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- ret = pci_enable_device(pdev);
- if (ret)
+ amdgpu_device_load_pci_state(pdev);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+ pci_set_master(pdev);
+ } else if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO) {
+ /* Only need to handle PCI state in the driver for ATPX
+ * PCI core handles it for _PR3.
+ */
+ pci_set_master(pdev);
+ } else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
+ (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) {
+ amdgpu_device_baco_exit(adev);
+ }
+ ret = amdgpu_device_resume(drm_dev, false);
+ if (ret) {
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_PX)
+ pci_disable_device(pdev);
return ret;
- pci_set_master(pdev);
+ }
- ret = amdgpu_device_resume(drm_dev, false, false);
- drm_kms_helper_poll_enable(drm_dev);
- vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
- drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_PX)
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+ adev->in_runpm = false;
return 0;
}
static int amdgpu_pmops_runtime_idle(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct drm_crtc *crtc;
+ struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ int ret;
- if (!amdgpu_device_is_px(drm_dev)) {
+ if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE) {
pm_runtime_forbid(dev);
return -EBUSY;
}
- list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
- if (crtc->enabled) {
- DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
- return -EBUSY;
- }
- }
+ ret = amdgpu_runtime_idle_check_display(dev);
+ if (ret)
+ goto done;
+ ret = amdgpu_runtime_idle_check_userq(dev);
+done:
pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
- /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
- return 1;
+ return ret;
+}
+
+static int amdgpu_drm_release(struct inode *inode, struct file *filp)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+ struct drm_device *dev = file_priv->minor->dev;
+ int idx;
+
+ if (fpriv && drm_dev_enter(dev, &idx)) {
+ fpriv->evf_mgr.fd_closing = true;
+ amdgpu_eviction_fence_destroy(&fpriv->evf_mgr);
+ amdgpu_userq_mgr_fini(&fpriv->userq_mgr);
+ drm_dev_exit(idx);
+ }
+
+ return drm_release(inode, filp);
}
long amdgpu_drm_ioctl(struct file *filp,
@@ -678,97 +2961,168 @@ long amdgpu_drm_ioctl(struct file *filp,
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev;
long ret;
+
dev = file_priv->minor->dev;
ret = pm_runtime_get_sync(dev->dev);
if (ret < 0)
- return ret;
+ goto out;
ret = drm_ioctl(filp, cmd, arg);
pm_runtime_mark_last_busy(dev->dev);
+out:
pm_runtime_put_autosuspend(dev->dev);
return ret;
}
static const struct dev_pm_ops amdgpu_pm_ops = {
- .suspend = amdgpu_pmops_suspend,
- .resume = amdgpu_pmops_resume,
- .freeze = amdgpu_pmops_freeze,
- .thaw = amdgpu_pmops_thaw,
- .poweroff = amdgpu_pmops_poweroff,
- .restore = amdgpu_pmops_restore,
+ .prepare = pm_sleep_ptr(amdgpu_pmops_prepare),
+ .complete = pm_sleep_ptr(amdgpu_pmops_complete),
+ .suspend = pm_sleep_ptr(amdgpu_pmops_suspend),
+ .suspend_noirq = pm_sleep_ptr(amdgpu_pmops_suspend_noirq),
+ .resume = pm_sleep_ptr(amdgpu_pmops_resume),
+ .freeze = pm_sleep_ptr(amdgpu_pmops_freeze),
+ .thaw = pm_sleep_ptr(amdgpu_pmops_thaw),
+ .poweroff = pm_sleep_ptr(amdgpu_pmops_poweroff),
+ .restore = pm_sleep_ptr(amdgpu_pmops_restore),
.runtime_suspend = amdgpu_pmops_runtime_suspend,
.runtime_resume = amdgpu_pmops_runtime_resume,
.runtime_idle = amdgpu_pmops_runtime_idle,
};
+static int amdgpu_flush(struct file *f, fl_owner_t id)
+{
+ struct drm_file *file_priv = f->private_data;
+ struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+ long timeout = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
+
+ timeout = amdgpu_ctx_mgr_entity_flush(&fpriv->ctx_mgr, timeout);
+ timeout = amdgpu_vm_wait_idle(&fpriv->vm, timeout);
+
+ return timeout >= 0 ? 0 : timeout;
+}
+
static const struct file_operations amdgpu_driver_kms_fops = {
.owner = THIS_MODULE,
.open = drm_open,
- .release = drm_release,
+ .flush = amdgpu_flush,
+ .release = amdgpu_drm_release,
.unlocked_ioctl = amdgpu_drm_ioctl,
- .mmap = amdgpu_mmap,
+ .mmap = drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
#ifdef CONFIG_COMPAT
.compat_ioctl = amdgpu_kms_compat_ioctl,
#endif
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = drm_show_fdinfo,
+#endif
+ .fop_flags = FOP_UNSIGNED_OFFSET,
+};
+
+int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
+{
+ struct drm_file *file;
+
+ if (!filp)
+ return -EINVAL;
+
+ if (filp->f_op != &amdgpu_driver_kms_fops)
+ return -EINVAL;
+
+ file = filp->private_data;
+ *fpriv = file->driver_priv;
+ return 0;
+}
+
+const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
+ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ /* KMS */
+ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_USERQ, amdgpu_userq_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_SIGNAL, amdgpu_userq_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_USERQ_WAIT, amdgpu_userq_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(AMDGPU_GEM_LIST_HANDLES, amdgpu_gem_list_handles_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};
-static struct drm_driver kms_driver = {
+static const struct drm_driver amdgpu_kms_driver = {
.driver_features =
- DRIVER_USE_AGP |
- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
- DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET,
- .load = amdgpu_driver_load_kms,
+ DRIVER_ATOMIC |
+ DRIVER_GEM |
+ DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ |
+ DRIVER_SYNCOBJ_TIMELINE,
.open = amdgpu_driver_open_kms,
.postclose = amdgpu_driver_postclose_kms,
- .lastclose = amdgpu_driver_lastclose_kms,
- .set_busid = drm_pci_set_busid,
- .unload = amdgpu_driver_unload_kms,
- .get_vblank_counter = amdgpu_get_vblank_counter_kms,
- .enable_vblank = amdgpu_enable_vblank_kms,
- .disable_vblank = amdgpu_disable_vblank_kms,
- .get_vblank_timestamp = amdgpu_get_vblank_timestamp_kms,
- .get_scanout_position = amdgpu_get_crtc_scanoutpos,
-#if defined(CONFIG_DEBUG_FS)
- .debugfs_init = amdgpu_debugfs_init,
+ .ioctls = amdgpu_ioctls_kms,
+ .num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms),
+ .dumb_create = amdgpu_mode_dumb_create,
+ .dumb_map_offset = amdgpu_mode_dumb_mmap,
+ DRM_FBDEV_TTM_DRIVER_OPS,
+ .fops = &amdgpu_driver_kms_fops,
+ .release = &amdgpu_driver_release_kms,
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = amdgpu_show_fdinfo,
#endif
- .irq_preinstall = amdgpu_irq_preinstall,
- .irq_postinstall = amdgpu_irq_postinstall,
- .irq_uninstall = amdgpu_irq_uninstall,
- .irq_handler = amdgpu_irq_handler,
+
+ .gem_prime_import = amdgpu_gem_prime_import,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .major = KMS_DRIVER_MAJOR,
+ .minor = KMS_DRIVER_MINOR,
+ .patchlevel = KMS_DRIVER_PATCHLEVEL,
+};
+
+const struct drm_driver amdgpu_partition_driver = {
+ .driver_features =
+ DRIVER_GEM | DRIVER_RENDER | DRIVER_SYNCOBJ |
+ DRIVER_SYNCOBJ_TIMELINE,
+ .open = amdgpu_driver_open_kms,
+ .postclose = amdgpu_driver_postclose_kms,
.ioctls = amdgpu_ioctls_kms,
- .gem_free_object_unlocked = amdgpu_gem_object_free,
- .gem_open_object = amdgpu_gem_object_open,
- .gem_close_object = amdgpu_gem_object_close,
+ .num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms),
.dumb_create = amdgpu_mode_dumb_create,
.dumb_map_offset = amdgpu_mode_dumb_mmap,
- .dumb_destroy = drm_gem_dumb_destroy,
+ DRM_FBDEV_TTM_DRIVER_OPS,
.fops = &amdgpu_driver_kms_fops,
+ .release = &amdgpu_driver_release_kms,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = amdgpu_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_pin = amdgpu_gem_prime_pin,
- .gem_prime_unpin = amdgpu_gem_prime_unpin,
- .gem_prime_res_obj = amdgpu_gem_prime_res_obj,
- .gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table,
- .gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
- .gem_prime_vmap = amdgpu_gem_prime_vmap,
- .gem_prime_vunmap = amdgpu_gem_prime_vunmap,
+ .gem_prime_import = amdgpu_gem_prime_import,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
- .date = DRIVER_DATE,
.major = KMS_DRIVER_MAJOR,
.minor = KMS_DRIVER_MINOR,
.patchlevel = KMS_DRIVER_PATCHLEVEL,
};
-static struct drm_driver *driver;
-static struct pci_driver *pdriver;
+static struct pci_error_handlers amdgpu_pci_err_handler = {
+ .error_detected = amdgpu_pci_error_detected,
+ .mmio_enabled = amdgpu_pci_mmio_enabled,
+ .slot_reset = amdgpu_pci_slot_reset,
+ .resume = amdgpu_pci_resume,
+};
+
+static const struct attribute_group *amdgpu_sysfs_groups[] = {
+ &amdgpu_vram_mgr_attr_group,
+ &amdgpu_gtt_mgr_attr_group,
+ &amdgpu_flash_attr_group,
+ NULL,
+};
static struct pci_driver amdgpu_kms_pci_driver = {
.name = DRIVER_NAME,
@@ -776,11 +3130,11 @@ static struct pci_driver amdgpu_kms_pci_driver = {
.probe = amdgpu_pci_probe,
.remove = amdgpu_pci_remove,
.shutdown = amdgpu_pci_shutdown,
- .driver.pm = &amdgpu_pm_ops,
+ .driver.pm = pm_ptr(&amdgpu_pm_ops),
+ .err_handler = &amdgpu_pci_err_handler,
+ .dev_groups = amdgpu_sysfs_groups,
};
-
-
static int __init amdgpu_init(void)
{
int r;
@@ -789,28 +3143,25 @@ static int __init amdgpu_init(void)
if (r)
goto error_sync;
- r = amdgpu_fence_slab_init();
+ r = amdgpu_userq_fence_slab_init();
if (r)
goto error_fence;
- r = amd_sched_fence_slab_init();
- if (r)
- goto error_sched;
-
- if (vgacon_text_force()) {
- DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
- return -EINVAL;
- }
DRM_INFO("amdgpu kernel modesetting enabled.\n");
- driver = &kms_driver;
- pdriver = &amdgpu_kms_pci_driver;
- driver->num_ioctls = amdgpu_max_kms_ioctl;
amdgpu_register_atpx_handler();
- /* let modprobe override vga console setting */
- return drm_pci_init(driver, pdriver);
+ amdgpu_acpi_detect();
-error_sched:
- amdgpu_fence_slab_fini();
+ /* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */
+ amdgpu_amdkfd_init();
+
+ if (amdgpu_pp_feature_mask & PP_OVERDRIVE_MASK) {
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+ pr_crit("Overdrive is enabled, please disable it before "
+ "reporting any bugs unrelated to overdrive.\n");
+ }
+
+ /* let modprobe override vga console setting */
+ return pci_register_driver(&amdgpu_kms_pci_driver);
error_fence:
amdgpu_sync_fini();
@@ -822,11 +3173,13 @@ error_sync:
static void __exit amdgpu_exit(void)
{
amdgpu_amdkfd_fini();
- drm_pci_exit(driver, pdriver);
+ pci_unregister_driver(&amdgpu_kms_pci_driver);
amdgpu_unregister_atpx_handler();
+ amdgpu_acpi_release();
amdgpu_sync_fini();
- amd_sched_fence_slab_fini();
- amdgpu_fence_slab_fini();
+ amdgpu_userq_fence_slab_fini();
+ mmu_notifier_synchronize();
+ amdgpu_xcp_drv_release();
}
module_init(amdgpu_init);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
index e3a4f7048042..2d86cc6f7f4d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.h
@@ -40,9 +40,13 @@
#define DRIVER_NAME "amdgpu"
#define DRIVER_DESC "AMD GPU"
-#define DRIVER_DATE "20150101"
+
+extern const struct drm_driver amdgpu_partition_driver;
long amdgpu_drm_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
+long amdgpu_kms_compat_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
new file mode 100644
index 000000000000..8cd69836dd99
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu_eeprom.h"
+#include "amdgpu.h"
+
+/* AT24CM02 and M24M02-R have a 256-byte write page size.
+ */
+#define EEPROM_PAGE_BITS 8
+#define EEPROM_PAGE_SIZE (1U << EEPROM_PAGE_BITS)
+#define EEPROM_PAGE_MASK (EEPROM_PAGE_SIZE - 1)
+
+#define EEPROM_OFFSET_SIZE 2
+
+/* EEPROM memory addresses are 19-bits long, which can
+ * be partitioned into 3, 8, 8 bits, for a total of 19.
+ * The upper 3 bits are sent as part of the 7-bit
+ * "Device Type Identifier"--an I2C concept, which for EEPROM devices
+ * is hard-coded as 1010b, indicating that it is an EEPROM
+ * device--this is the wire format, followed by the upper
+ * 3 bits of the 19-bit address, followed by the direction,
+ * followed by two bytes holding the rest of the 16-bits of
+ * the EEPROM memory address. The format on the wire for EEPROM
+ * devices is: 1010XYZD, A15:A8, A7:A0,
+ * Where D is the direction and sequenced out by the hardware.
+ * Bits XYZ are memory address bits 18, 17 and 16.
+ * These bits are compared to how pins 1-3 of the part are connected,
+ * depending on the size of the part, more on that later.
+ *
+ * Note that of this wire format, a client is in control
+ * of, and needs to specify only XYZ, A15:A8, A7:0, bits,
+ * which is exactly the EEPROM memory address, or offset,
+ * in order to address up to 8 EEPROM devices on the I2C bus.
+ *
+ * For instance, a 2-Mbit I2C EEPROM part, addresses all its bytes,
+ * using an 18-bit address, bit 17 to 0 and thus would use all but one bit of
+ * the 19 bits previously mentioned. The designer would then not connect
+ * pins 1 and 2, and pin 3 usually named "A_2" or "E2", would be connected to
+ * either Vcc or GND. This would allow for up to two 2-Mbit parts on
+ * the same bus, where one would be addressable with bit 18 as 1, and
+ * the other with bit 18 of the address as 0.
+ *
+ * For a 2-Mbit part, bit 18 is usually known as the "Chip Enable" or
+ * "Hardware Address Bit". This bit is compared to the load on pin 3
+ * of the device, described above, and if there is a match, then this
+ * device responds to the command. This way, you can connect two
+ * 2-Mbit EEPROM devices on the same bus, but see one contiguous
+ * memory from 0 to 7FFFFh, where address 0 to 3FFFF is in the device
+ * whose pin 3 is connected to GND, and address 40000 to 7FFFFh is in
+ * the 2nd device, whose pin 3 is connected to Vcc.
+ *
+ * This addressing you encode in the 32-bit "eeprom_addr" below,
+ * namely the 19-bits "XYZ,A15:A0", as a single 19-bit address. For
+ * instance, eeprom_addr = 0x6DA01, is 110_1101_1010_0000_0001, where
+ * XYZ=110b, and A15:A0=DA01h. The XYZ bits become part of the device
+ * address, and the rest of the address bits are sent as the memory
+ * address bytes.
+ *
+ * That is, for an I2C EEPROM driver everything is controlled by
+ * the "eeprom_addr".
+ *
+ * See also top of amdgpu_ras_eeprom.c.
+ *
+ * P.S. If you need to write, lock and read the Identification Page,
+ * (M24M02-DR device only, which we do not use), change the "7" to
+ * "0xF" in the macro below, and let the client set bit 20 to 1 in
+ * "eeprom_addr", and set A10 to 0 to write into it, and A10 and A1 to
+ * 1 to lock it permanently.
+ */
+#define MAKE_I2C_ADDR(_aa) ((0xA << 3) | (((_aa) >> 16) & 0xF))
+
+static int __amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
+ u8 *eeprom_buf, u32 buf_size, bool read)
+{
+ u8 eeprom_offset_buf[EEPROM_OFFSET_SIZE];
+ struct i2c_msg msgs[] = {
+ {
+ .flags = 0,
+ .len = EEPROM_OFFSET_SIZE,
+ .buf = eeprom_offset_buf,
+ },
+ {
+ .flags = read ? I2C_M_RD : 0,
+ },
+ };
+ const u8 *p = eeprom_buf;
+ int r;
+ u16 len;
+
+ for (r = 0; buf_size > 0;
+ buf_size -= len, eeprom_addr += len, eeprom_buf += len) {
+ /* Set the EEPROM address we want to write to/read from.
+ */
+ msgs[0].addr = MAKE_I2C_ADDR(eeprom_addr);
+ msgs[1].addr = msgs[0].addr;
+ msgs[0].buf[0] = (eeprom_addr >> 8) & 0xff;
+ msgs[0].buf[1] = eeprom_addr & 0xff;
+
+ if (!read) {
+ /* Write the maximum amount of data, without
+ * crossing the device's page boundary, as per
+ * its spec. Partial page writes are allowed,
+ * starting at any location within the page,
+ * so long as the page boundary isn't crossed
+ * over (actually the page pointer rolls
+ * over).
+ *
+ * As per the AT24CM02 EEPROM spec, after
+ * writing into a page, the I2C driver should
+ * terminate the transfer, i.e. in
+ * "i2c_transfer()" below, with a STOP
+ * condition, so that the self-timed write
+ * cycle begins. This is implied for the
+ * "i2c_transfer()" abstraction.
+ */
+ len = min(EEPROM_PAGE_SIZE - (eeprom_addr & EEPROM_PAGE_MASK),
+ buf_size);
+ } else {
+ /* Reading from the EEPROM has no limitation
+ * on the number of bytes read from the EEPROM
+ * device--they are simply sequenced out.
+ * Keep in mind that i2c_msg.len is u16 type.
+ */
+ len = min(U16_MAX, buf_size);
+ }
+ msgs[1].len = len;
+ msgs[1].buf = eeprom_buf;
+
+ /* This constitutes a START-STOP transaction.
+ */
+ r = i2c_transfer(i2c_adap, msgs, ARRAY_SIZE(msgs));
+ if (r != ARRAY_SIZE(msgs))
+ break;
+
+ if (!read) {
+ /* According to EEPROM specs the length of the
+ * self-writing cycle, tWR (tW), is 10 ms.
+ *
+ * TODO: Use polling on ACK, aka Acknowledge
+ * Polling, to minimize waiting for the
+ * internal write cycle to complete, as it is
+ * usually smaller than tWR (tW).
+ */
+ msleep(10);
+ }
+ }
+
+ return r < 0 ? r : eeprom_buf - p;
+}
+
+/**
+ * amdgpu_eeprom_xfer -- Read/write from/to an I2C EEPROM device
+ * @i2c_adap: pointer to the I2C adapter to use
+ * @eeprom_addr: EEPROM address from which to read/write
+ * @eeprom_buf: pointer to data buffer to read into/write from
+ * @buf_size: the size of @eeprom_buf
+ * @read: True if reading from the EEPROM, false if writing
+ *
+ * Returns the number of bytes read/written; -errno on error.
+ */
+static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
+ u8 *eeprom_buf, u32 buf_size, bool read)
+{
+ const struct i2c_adapter_quirks *quirks = i2c_adap->quirks;
+ u16 limit;
+ u16 ps; /* Partial size */
+ int res = 0, r;
+
+ if (!quirks)
+ limit = 0;
+ else if (read)
+ limit = quirks->max_read_len;
+ else
+ limit = quirks->max_write_len;
+
+ if (limit == 0) {
+ return __amdgpu_eeprom_xfer(i2c_adap, eeprom_addr,
+ eeprom_buf, buf_size, read);
+ } else if (limit <= EEPROM_OFFSET_SIZE) {
+ dev_err_ratelimited(&i2c_adap->dev,
+ "maddr:0x%04X size:0x%02X:quirk max_%s_len must be > %d",
+ eeprom_addr, buf_size,
+ str_read_write(read), EEPROM_OFFSET_SIZE);
+ return -EINVAL;
+ }
+
+ /* The "limit" includes all data bytes sent/received,
+ * which would include the EEPROM_OFFSET_SIZE bytes.
+ * Account for them here.
+ */
+ limit -= EEPROM_OFFSET_SIZE;
+ for ( ; buf_size > 0;
+ buf_size -= ps, eeprom_addr += ps, eeprom_buf += ps) {
+ ps = min(limit, buf_size);
+
+ r = __amdgpu_eeprom_xfer(i2c_adap, eeprom_addr,
+ eeprom_buf, ps, read);
+ if (r < 0)
+ return r;
+ res += r;
+ }
+
+ return res;
+}
+
+int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
+ u32 eeprom_addr, u8 *eeprom_buf,
+ u32 bytes)
+{
+ return amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, eeprom_buf, bytes,
+ true);
+}
+
+int amdgpu_eeprom_write(struct i2c_adapter *i2c_adap,
+ u32 eeprom_addr, u8 *eeprom_buf,
+ u32 bytes)
+{
+ return amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, eeprom_buf, bytes,
+ false);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.h
new file mode 100644
index 000000000000..8083b8253ef4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eeprom.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _AMDGPU_EEPROM_H
+#define _AMDGPU_EEPROM_H
+
+#include <linux/i2c.h>
+
+int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
+ u32 eeprom_addr, u8 *eeprom_buf,
+ u32 bytes);
+
+int amdgpu_eeprom_write(struct i2c_adapter *i2c_adap,
+ u32 eeprom_addr, u8 *eeprom_buf,
+ u32 bytes);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
index 94138abe093b..3aaeed2d3562 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
@@ -23,30 +23,32 @@
* Authors: Dave Airlie
* Alex Deucher
*/
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
+
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_connectors.h"
+#include "amdgpu_display.h"
#include "atom.h"
#include "atombios_encoders.h"
void
amdgpu_link_encoder_connector(struct drm_device *dev)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
+ drm_connector_list_iter_begin(dev, &iter);
/* walk the list and link encoders to connectors */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
amdgpu_encoder = to_amdgpu_encoder(encoder);
if (amdgpu_encoder->devices & amdgpu_connector->devices) {
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_encoder(connector, encoder);
if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
amdgpu_atombios_encoder_init_backlight(amdgpu_encoder, connector);
adev->mode_info.bl_encoder = amdgpu_encoder;
@@ -54,6 +56,7 @@ amdgpu_link_encoder_connector(struct drm_device *dev)
}
}
}
+ drm_connector_list_iter_end(&iter);
}
void amdgpu_encoder_set_active_device(struct drm_encoder *encoder)
@@ -61,16 +64,20 @@ void amdgpu_encoder_set_active_device(struct drm_encoder *encoder)
struct drm_device *dev = encoder->dev;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+
amdgpu_encoder->active_device = amdgpu_encoder->devices & amdgpu_connector->devices;
DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n",
amdgpu_encoder->active_device, amdgpu_encoder->devices,
amdgpu_connector->devices, encoder->encoder_type);
}
}
+ drm_connector_list_iter_end(&iter);
}
struct drm_connector *
@@ -78,15 +85,20 @@ amdgpu_get_connector_for_encoder(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct drm_connector *connector;
+ struct drm_connector *connector, *found = NULL;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
- if (amdgpu_encoder->active_device & amdgpu_connector->devices)
- return connector;
+ if (amdgpu_encoder->active_device & amdgpu_connector->devices) {
+ found = connector;
+ break;
+ }
}
- return NULL;
+ drm_connector_list_iter_end(&iter);
+ return found;
}
struct drm_connector *
@@ -94,15 +106,20 @@ amdgpu_get_connector_for_encoder_init(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct drm_connector *connector;
+ struct drm_connector *connector, *found = NULL;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
amdgpu_connector = to_amdgpu_connector(connector);
- if (amdgpu_encoder->devices & amdgpu_connector->devices)
- return connector;
+ if (amdgpu_encoder->devices & amdgpu_connector->devices) {
+ found = connector;
+ break;
+ }
}
- return NULL;
+ drm_connector_list_iter_end(&iter);
+ return found;
}
struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder)
@@ -149,12 +166,12 @@ void amdgpu_panel_mode_fixup(struct drm_encoder *encoder,
{
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
- unsigned hblank = native_mode->htotal - native_mode->hdisplay;
- unsigned vblank = native_mode->vtotal - native_mode->vdisplay;
- unsigned hover = native_mode->hsync_start - native_mode->hdisplay;
- unsigned vover = native_mode->vsync_start - native_mode->vdisplay;
- unsigned hsync_width = native_mode->hsync_end - native_mode->hsync_start;
- unsigned vsync_width = native_mode->vsync_end - native_mode->vsync_start;
+ unsigned int hblank = native_mode->htotal - native_mode->hdisplay;
+ unsigned int vblank = native_mode->vtotal - native_mode->vdisplay;
+ unsigned int hover = native_mode->hsync_start - native_mode->hdisplay;
+ unsigned int vover = native_mode->vsync_start - native_mode->vdisplay;
+ unsigned int hsync_width = native_mode->hsync_end - native_mode->hsync_start;
+ unsigned int vsync_width = native_mode->vsync_end - native_mode->vsync_start;
adjusted_mode->clock = native_mode->clock;
adjusted_mode->flags = native_mode->flags;
@@ -205,7 +222,7 @@ bool amdgpu_dig_monitor_is_duallink(struct drm_encoder *encoder,
case DRM_MODE_CONNECTOR_HDMIB:
if (amdgpu_connector->use_digital) {
/* HDMI 1.3 supports up to 340 Mhz over single link */
- if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+ if (connector->display_info.is_hdmi) {
if (pixel_clock > 340000)
return true;
else
@@ -227,7 +244,7 @@ bool amdgpu_dig_monitor_is_duallink(struct drm_encoder *encoder,
return false;
else {
/* HDMI 1.3 supports up to 340 Mhz over single link */
- if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector))) {
+ if (connector->display_info.is_hdmi) {
if (pixel_clock > 340000)
return true;
else
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
new file mode 100644
index 000000000000..23d7d0b0d625
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2024 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/sched.h>
+#include <drm/drm_exec.h>
+#include "amdgpu.h"
+
+#define work_to_evf_mgr(w, name) container_of(w, struct amdgpu_eviction_fence_mgr, name)
+#define evf_mgr_to_fpriv(e) container_of(e, struct amdgpu_fpriv, evf_mgr)
+
+static const char *
+amdgpu_eviction_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "amdgpu_eviction_fence";
+}
+
+static const char *
+amdgpu_eviction_fence_get_timeline_name(struct dma_fence *f)
+{
+ struct amdgpu_eviction_fence *ef;
+
+ ef = container_of(f, struct amdgpu_eviction_fence, base);
+ return ef->timeline_name;
+}
+
+int
+amdgpu_eviction_fence_replace_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct drm_exec *exec)
+{
+ struct amdgpu_eviction_fence *old_ef, *new_ef;
+ struct drm_gem_object *obj;
+ unsigned long index;
+ int ret;
+
+ if (evf_mgr->ev_fence &&
+ !dma_fence_is_signaled(&evf_mgr->ev_fence->base))
+ return 0;
+ /*
+ * Steps to replace eviction fence:
+ * * lock all objects in exec (caller)
+ * * create a new eviction fence
+ * * update new eviction fence in evf_mgr
+ * * attach the new eviction fence to BOs
+ * * release the old fence
+ * * unlock the objects (caller)
+ */
+ new_ef = amdgpu_eviction_fence_create(evf_mgr);
+ if (!new_ef) {
+ DRM_ERROR("Failed to create new eviction fence\n");
+ return -ENOMEM;
+ }
+
+ /* Update the eviction fence now */
+ spin_lock(&evf_mgr->ev_fence_lock);
+ old_ef = evf_mgr->ev_fence;
+ evf_mgr->ev_fence = new_ef;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+
+ /* Attach the new fence */
+ drm_exec_for_each_locked_object(exec, index, obj) {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ if (!bo)
+ continue;
+ ret = amdgpu_eviction_fence_attach(evf_mgr, bo);
+ if (ret) {
+ DRM_ERROR("Failed to attch new eviction fence\n");
+ goto free_err;
+ }
+ }
+
+ /* Free old fence */
+ if (old_ef)
+ dma_fence_put(&old_ef->base);
+ return 0;
+
+free_err:
+ kfree(new_ef);
+ return ret;
+}
+
+static void
+amdgpu_eviction_fence_suspend_worker(struct work_struct *work)
+{
+ struct amdgpu_eviction_fence_mgr *evf_mgr = work_to_evf_mgr(work, suspend_work.work);
+ struct amdgpu_fpriv *fpriv = evf_mgr_to_fpriv(evf_mgr);
+ struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_eviction_fence *ev_fence;
+
+ mutex_lock(&uq_mgr->userq_mutex);
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ if (ev_fence)
+ dma_fence_get(&ev_fence->base);
+ else
+ goto unlock;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+
+ amdgpu_userq_evict(uq_mgr, ev_fence);
+
+ mutex_unlock(&uq_mgr->userq_mutex);
+ dma_fence_put(&ev_fence->base);
+ return;
+
+unlock:
+ spin_unlock(&evf_mgr->ev_fence_lock);
+ mutex_unlock(&uq_mgr->userq_mutex);
+}
+
+static bool amdgpu_eviction_fence_enable_signaling(struct dma_fence *f)
+{
+ struct amdgpu_eviction_fence_mgr *evf_mgr;
+ struct amdgpu_eviction_fence *ev_fence;
+
+ if (!f)
+ return true;
+
+ ev_fence = to_ev_fence(f);
+ evf_mgr = ev_fence->evf_mgr;
+
+ schedule_delayed_work(&evf_mgr->suspend_work, 0);
+ return true;
+}
+
+static const struct dma_fence_ops amdgpu_eviction_fence_ops = {
+ .get_driver_name = amdgpu_eviction_fence_get_driver_name,
+ .get_timeline_name = amdgpu_eviction_fence_get_timeline_name,
+ .enable_signaling = amdgpu_eviction_fence_enable_signaling,
+};
+
+void amdgpu_eviction_fence_signal(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_eviction_fence *ev_fence)
+{
+ spin_lock(&evf_mgr->ev_fence_lock);
+ dma_fence_signal(&ev_fence->base);
+ spin_unlock(&evf_mgr->ev_fence_lock);
+}
+
+struct amdgpu_eviction_fence *
+amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+
+ ev_fence = kzalloc(sizeof(*ev_fence), GFP_KERNEL);
+ if (!ev_fence)
+ return NULL;
+
+ ev_fence->evf_mgr = evf_mgr;
+ get_task_comm(ev_fence->timeline_name, current);
+ spin_lock_init(&ev_fence->lock);
+ dma_fence_init64(&ev_fence->base, &amdgpu_eviction_fence_ops,
+ &ev_fence->lock, evf_mgr->ev_fence_ctx,
+ atomic_inc_return(&evf_mgr->ev_fence_seq));
+ return ev_fence;
+}
+
+void amdgpu_eviction_fence_destroy(struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+
+ /* Wait for any pending work to execute */
+ flush_delayed_work(&evf_mgr->suspend_work);
+
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+
+ if (!ev_fence)
+ return;
+
+ dma_fence_wait(&ev_fence->base, false);
+
+ /* Last unref of ev_fence */
+ dma_fence_put(&ev_fence->base);
+}
+
+int amdgpu_eviction_fence_attach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+ struct dma_resv *resv = bo->tbo.base.resv;
+ int ret;
+
+ if (!resv)
+ return 0;
+
+ ret = dma_resv_reserve_fences(resv, 1);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Failed to resv fence space\n");
+ return ret;
+ }
+
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ if (ev_fence)
+ dma_resv_add_fence(resv, &ev_fence->base, DMA_RESV_USAGE_BOOKKEEP);
+ spin_unlock(&evf_mgr->ev_fence_lock);
+
+ return 0;
+}
+
+void amdgpu_eviction_fence_detach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo)
+{
+ struct dma_fence *stub = dma_fence_get_stub();
+
+ dma_resv_replace_fences(bo->tbo.base.resv, evf_mgr->ev_fence_ctx,
+ stub, DMA_RESV_USAGE_BOOKKEEP);
+ dma_fence_put(stub);
+}
+
+int amdgpu_eviction_fence_init(struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ /* This needs to be done one time per open */
+ atomic_set(&evf_mgr->ev_fence_seq, 0);
+ evf_mgr->ev_fence_ctx = dma_fence_context_alloc(1);
+ spin_lock_init(&evf_mgr->ev_fence_lock);
+
+ INIT_DELAYED_WORK(&evf_mgr->suspend_work, amdgpu_eviction_fence_suspend_worker);
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h
new file mode 100644
index 000000000000..fcd867b7147d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGPU_EV_FENCE_H_
+#define AMDGPU_EV_FENCE_H_
+
+struct amdgpu_eviction_fence {
+ struct dma_fence base;
+ spinlock_t lock;
+ char timeline_name[TASK_COMM_LEN];
+ struct amdgpu_eviction_fence_mgr *evf_mgr;
+};
+
+struct amdgpu_eviction_fence_mgr {
+ u64 ev_fence_ctx;
+ atomic_t ev_fence_seq;
+ spinlock_t ev_fence_lock;
+ struct amdgpu_eviction_fence *ev_fence;
+ struct delayed_work suspend_work;
+ uint8_t fd_closing;
+};
+
+/* Eviction fence helper functions */
+struct amdgpu_eviction_fence *
+amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+void
+amdgpu_eviction_fence_destroy(struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+int
+amdgpu_eviction_fence_attach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo);
+
+void
+amdgpu_eviction_fence_detach(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo);
+
+int
+amdgpu_eviction_fence_init(struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+void
+amdgpu_eviction_fence_signal(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_eviction_fence *ev_fence);
+
+int
+amdgpu_eviction_fence_replace_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct drm_exec *exec);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
deleted file mode 100644
index 236d9950221b..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ /dev/null
@@ -1,440 +0,0 @@
-/*
- * Copyright © 2007 David Airlie
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * David Airlie
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/pm_runtime.h>
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/amdgpu_drm.h>
-#include "amdgpu.h"
-#include "cikd.h"
-
-#include <drm/drm_fb_helper.h>
-
-#include <linux/vga_switcheroo.h>
-
-/* object hierarchy -
- this contains a helper + a amdgpu fb
- the helper contains a pointer to amdgpu framebuffer baseclass.
-*/
-struct amdgpu_fbdev {
- struct drm_fb_helper helper;
- struct amdgpu_framebuffer rfb;
- struct amdgpu_device *adev;
-};
-
-static int
-amdgpufb_open(struct fb_info *info, int user)
-{
- struct amdgpu_fbdev *rfbdev = info->par;
- struct amdgpu_device *adev = rfbdev->adev;
- int ret = pm_runtime_get_sync(adev->ddev->dev);
- if (ret < 0 && ret != -EACCES) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return ret;
- }
- return 0;
-}
-
-static int
-amdgpufb_release(struct fb_info *info, int user)
-{
- struct amdgpu_fbdev *rfbdev = info->par;
- struct amdgpu_device *adev = rfbdev->adev;
-
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
- return 0;
-}
-
-static struct fb_ops amdgpufb_ops = {
- .owner = THIS_MODULE,
- DRM_FB_HELPER_DEFAULT_OPS,
- .fb_open = amdgpufb_open,
- .fb_release = amdgpufb_release,
- .fb_fillrect = drm_fb_helper_cfb_fillrect,
- .fb_copyarea = drm_fb_helper_cfb_copyarea,
- .fb_imageblit = drm_fb_helper_cfb_imageblit,
-};
-
-
-int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int cpp, bool tiled)
-{
- int aligned = width;
- int pitch_mask = 0;
-
- switch (cpp) {
- case 1:
- pitch_mask = 255;
- break;
- case 2:
- pitch_mask = 127;
- break;
- case 3:
- case 4:
- pitch_mask = 63;
- break;
- }
-
- aligned += pitch_mask;
- aligned &= ~pitch_mask;
- return aligned * cpp;
-}
-
-static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
-{
- struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
- int ret;
-
- ret = amdgpu_bo_reserve(abo, true);
- if (likely(ret == 0)) {
- amdgpu_bo_kunmap(abo);
- amdgpu_bo_unpin(abo);
- amdgpu_bo_unreserve(abo);
- }
- drm_gem_object_unreference_unlocked(gobj);
-}
-
-static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
- struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object **gobj_p)
-{
- struct amdgpu_device *adev = rfbdev->adev;
- struct drm_gem_object *gobj = NULL;
- struct amdgpu_bo *abo = NULL;
- bool fb_tiled = false; /* useful for testing */
- u32 tiling_flags = 0;
- int ret;
- int aligned_size, size;
- int height = mode_cmd->height;
- u32 cpp;
-
- cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0);
-
- /* need to align pitch with crtc limits */
- mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
- fb_tiled);
-
- height = ALIGN(mode_cmd->height, 8);
- size = mode_cmd->pitches[0] * height;
- aligned_size = ALIGN(size, PAGE_SIZE);
- ret = amdgpu_gem_object_create(adev, aligned_size, 0,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
- AMDGPU_GEM_CREATE_VRAM_CLEARED,
- true, &gobj);
- if (ret) {
- pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
- return -ENOMEM;
- }
- abo = gem_to_amdgpu_bo(gobj);
-
- if (fb_tiled)
- tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1);
-
- ret = amdgpu_bo_reserve(abo, false);
- if (unlikely(ret != 0))
- goto out_unref;
-
- if (tiling_flags) {
- ret = amdgpu_bo_set_tiling_flags(abo,
- tiling_flags);
- if (ret)
- dev_err(adev->dev, "FB failed to set tiling flags\n");
- }
-
-
- ret = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, NULL);
- if (ret) {
- amdgpu_bo_unreserve(abo);
- goto out_unref;
- }
- ret = amdgpu_bo_kmap(abo, NULL);
- amdgpu_bo_unreserve(abo);
- if (ret) {
- goto out_unref;
- }
-
- *gobj_p = gobj;
- return 0;
-out_unref:
- amdgpufb_destroy_pinned_object(gobj);
- *gobj_p = NULL;
- return ret;
-}
-
-static int amdgpufb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct amdgpu_fbdev *rfbdev = (struct amdgpu_fbdev *)helper;
- struct amdgpu_device *adev = rfbdev->adev;
- struct fb_info *info;
- struct drm_framebuffer *fb = NULL;
- struct drm_mode_fb_cmd2 mode_cmd;
- struct drm_gem_object *gobj = NULL;
- struct amdgpu_bo *abo = NULL;
- int ret;
- unsigned long tmp;
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
-
- if (sizes->surface_bpp == 24)
- sizes->surface_bpp = 32;
-
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
-
- ret = amdgpufb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
- if (ret) {
- DRM_ERROR("failed to create fbcon object %d\n", ret);
- return ret;
- }
-
- abo = gem_to_amdgpu_bo(gobj);
-
- /* okay we have an object now allocate the framebuffer */
- info = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto out;
- }
-
- info->par = rfbdev;
- info->skip_vt_switch = true;
-
- ret = amdgpu_framebuffer_init(adev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
- if (ret) {
- DRM_ERROR("failed to initialize framebuffer %d\n", ret);
- goto out;
- }
-
- fb = &rfbdev->rfb.base;
-
- /* setup helper */
- rfbdev->helper.fb = fb;
-
- strcpy(info->fix.id, "amdgpudrmfb");
-
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
-
- info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
- info->fbops = &amdgpufb_ops;
-
- tmp = amdgpu_bo_gpu_offset(abo) - adev->mc.vram_start;
- info->fix.smem_start = adev->mc.aper_base + tmp;
- info->fix.smem_len = amdgpu_bo_size(abo);
- info->screen_base = abo->kptr;
- info->screen_size = amdgpu_bo_size(abo);
-
- drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
-
- /* setup aperture base/size for vesafb takeover */
- info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
- info->apertures->ranges[0].size = adev->mc.aper_size;
-
- /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
-
- if (info->screen_base == NULL) {
- ret = -ENOSPC;
- goto out;
- }
-
- DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
- DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->mc.aper_base);
- DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo));
- DRM_INFO("fb depth is %d\n", fb->format->depth);
- DRM_INFO(" pitch is %d\n", fb->pitches[0]);
-
- vga_switcheroo_client_fb_set(adev->ddev->pdev, info);
- return 0;
-
-out:
- if (abo) {
-
- }
- if (fb && ret) {
- drm_gem_object_unreference_unlocked(gobj);
- drm_framebuffer_unregister_private(fb);
- drm_framebuffer_cleanup(fb);
- kfree(fb);
- }
- return ret;
-}
-
-void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev)
-{
- if (adev->mode_info.rfbdev)
- drm_fb_helper_hotplug_event(&adev->mode_info.rfbdev->helper);
-}
-
-static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev)
-{
- struct amdgpu_framebuffer *rfb = &rfbdev->rfb;
-
- drm_fb_helper_unregister_fbi(&rfbdev->helper);
-
- if (rfb->obj) {
- amdgpufb_destroy_pinned_object(rfb->obj);
- rfb->obj = NULL;
- }
- drm_fb_helper_fini(&rfbdev->helper);
- drm_framebuffer_unregister_private(&rfb->base);
- drm_framebuffer_cleanup(&rfb->base);
-
- return 0;
-}
-
-/** Sets the color ramps on behalf of fbcon */
-static void amdgpu_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
- u16 blue, int regno)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- amdgpu_crtc->lut_r[regno] = red >> 6;
- amdgpu_crtc->lut_g[regno] = green >> 6;
- amdgpu_crtc->lut_b[regno] = blue >> 6;
-}
-
-/** Gets the color ramps on behalf of fbcon */
-static void amdgpu_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, int regno)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- *red = amdgpu_crtc->lut_r[regno] << 6;
- *green = amdgpu_crtc->lut_g[regno] << 6;
- *blue = amdgpu_crtc->lut_b[regno] << 6;
-}
-
-static const struct drm_fb_helper_funcs amdgpu_fb_helper_funcs = {
- .gamma_set = amdgpu_crtc_fb_gamma_set,
- .gamma_get = amdgpu_crtc_fb_gamma_get,
- .fb_probe = amdgpufb_create,
-};
-
-int amdgpu_fbdev_init(struct amdgpu_device *adev)
-{
- struct amdgpu_fbdev *rfbdev;
- int bpp_sel = 32;
- int ret;
-
- /* don't init fbdev on hw without DCE */
- if (!adev->mode_info.mode_config_initialized)
- return 0;
-
- /* don't init fbdev if there are no connectors */
- if (list_empty(&adev->ddev->mode_config.connector_list))
- return 0;
-
- /* select 8 bpp console on low vram cards */
- if (adev->mc.real_vram_size <= (32*1024*1024))
- bpp_sel = 8;
-
- rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL);
- if (!rfbdev)
- return -ENOMEM;
-
- rfbdev->adev = adev;
- adev->mode_info.rfbdev = rfbdev;
-
- drm_fb_helper_prepare(adev->ddev, &rfbdev->helper,
- &amdgpu_fb_helper_funcs);
-
- ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper,
- AMDGPUFB_CONN_LIMIT);
- if (ret) {
- kfree(rfbdev);
- return ret;
- }
-
- drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
-
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(adev->ddev);
-
- drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
- return 0;
-}
-
-void amdgpu_fbdev_fini(struct amdgpu_device *adev)
-{
- if (!adev->mode_info.rfbdev)
- return;
-
- amdgpu_fbdev_destroy(adev->ddev, adev->mode_info.rfbdev);
- kfree(adev->mode_info.rfbdev);
- adev->mode_info.rfbdev = NULL;
-}
-
-void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state)
-{
- if (adev->mode_info.rfbdev)
- drm_fb_helper_set_suspend(&adev->mode_info.rfbdev->helper,
- state);
-}
-
-int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
-{
- struct amdgpu_bo *robj;
- int size = 0;
-
- if (!adev->mode_info.rfbdev)
- return 0;
-
- robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.obj);
- size += amdgpu_bo_size(robj);
- return size;
-}
-
-bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
-{
- if (!adev->mode_info.rfbdev)
- return false;
- if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.obj))
- return true;
- return false;
-}
-
-void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev)
-{
- struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev;
- struct drm_fb_helper *fb_helper;
- int ret;
-
- if (!afbdev)
- return;
-
- fb_helper = &afbdev->helper;
-
- ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
- if (ret)
- DRM_DEBUG("failed to restore crtc mode\n");
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
new file mode 100644
index 000000000000..b349bb3676d5
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: MIT
+/* Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: David Nieto
+ * Roy Sun
+ */
+
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/reboot.h>
+#include <linux/syscalls.h>
+
+#include <drm/amdgpu_drm.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+
+#include "amdgpu.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_gem.h"
+#include "amdgpu_ctx.h"
+#include "amdgpu_fdinfo.h"
+
+
+static const char *amdgpu_ip_name[AMDGPU_HW_IP_NUM] = {
+ [AMDGPU_HW_IP_GFX] = "gfx",
+ [AMDGPU_HW_IP_COMPUTE] = "compute",
+ [AMDGPU_HW_IP_DMA] = "dma",
+ [AMDGPU_HW_IP_UVD] = "dec",
+ [AMDGPU_HW_IP_VCE] = "enc",
+ [AMDGPU_HW_IP_UVD_ENC] = "enc_1",
+ [AMDGPU_HW_IP_VCN_DEC] = "dec",
+ [AMDGPU_HW_IP_VCN_ENC] = "enc",
+ [AMDGPU_HW_IP_VCN_JPEG] = "jpeg",
+ [AMDGPU_HW_IP_VPE] = "vpe",
+};
+
+void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file)
+{
+ struct amdgpu_fpriv *fpriv = file->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
+
+ struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM];
+ ktime_t usage[AMDGPU_HW_IP_NUM];
+ const char *pl_name[] = {
+ [TTM_PL_VRAM] = "vram",
+ [TTM_PL_TT] = "gtt",
+ [TTM_PL_SYSTEM] = "cpu",
+ [AMDGPU_PL_GDS] = "gds",
+ [AMDGPU_PL_GWS] = "gws",
+ [AMDGPU_PL_OA] = "oa",
+ [AMDGPU_PL_DOORBELL] = "doorbell",
+ [AMDGPU_PL_MMIO_REMAP] = "mmioremap",
+ };
+ unsigned int hw_ip, i;
+
+ amdgpu_vm_get_memory(vm, stats);
+ amdgpu_ctx_mgr_usage(&fpriv->ctx_mgr, usage);
+
+ /*
+ * ******************************************************************
+ * For text output format description please see drm-usage-stats.rst!
+ * ******************************************************************
+ */
+
+ drm_printf(p, "pasid:\t%u\n", fpriv->vm.pasid);
+
+ for (i = 0; i < ARRAY_SIZE(pl_name); i++) {
+ if (!pl_name[i])
+ continue;
+
+ drm_print_memory_stats(p,
+ &stats[i].drm,
+ DRM_GEM_OBJECT_RESIDENT |
+ DRM_GEM_OBJECT_PURGEABLE,
+ pl_name[i]);
+ }
+
+ /* Legacy amdgpu keys, alias to drm-resident-memory-: */
+ drm_printf(p, "drm-memory-vram:\t%llu KiB\n",
+ stats[TTM_PL_VRAM].drm.resident/1024UL);
+ drm_printf(p, "drm-memory-gtt: \t%llu KiB\n",
+ stats[TTM_PL_TT].drm.resident/1024UL);
+ drm_printf(p, "drm-memory-cpu: \t%llu KiB\n",
+ stats[TTM_PL_SYSTEM].drm.resident/1024UL);
+
+ /* Amdgpu specific memory accounting keys: */
+ drm_printf(p, "amd-evicted-vram:\t%llu KiB\n",
+ stats[TTM_PL_VRAM].evicted/1024UL);
+ drm_printf(p, "amd-requested-vram:\t%llu KiB\n",
+ (stats[TTM_PL_VRAM].drm.shared +
+ stats[TTM_PL_VRAM].drm.private) / 1024UL);
+ drm_printf(p, "amd-requested-gtt:\t%llu KiB\n",
+ (stats[TTM_PL_TT].drm.shared +
+ stats[TTM_PL_TT].drm.private) / 1024UL);
+
+ for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
+ if (!usage[hw_ip])
+ continue;
+
+ drm_printf(p, "drm-engine-%s:\t%lld ns\n", amdgpu_ip_name[hw_ip],
+ ktime_to_ns(usage[hw_ip]));
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h
new file mode 100644
index 000000000000..0398f5a159ef
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: MIT
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: David Nieto
+ * Roy Sun
+ */
+#ifndef __AMDGPU_SMI_H__
+#define __AMDGPU_SMI_H__
+
+#include <linux/idr.h>
+#include <linux/kfifo.h>
+#include <linux/rbtree.h>
+#include <drm/gpu_scheduler.h>
+#include <drm/drm_file.h>
+#include <linux/sched/mm.h>
+
+#include "amdgpu_sync.h"
+#include "amdgpu_ring.h"
+#include "amdgpu_ids.h"
+
+uint32_t amdgpu_get_ip_count(struct amdgpu_device *adev, int id);
+void amdgpu_show_fdinfo(struct drm_printer *p, struct drm_file *file);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 7b60fb79c3a6..fd8cca241da6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -34,52 +34,24 @@
#include <linux/kref.h>
#include <linux/slab.h>
#include <linux/firmware.h>
-#include <drm/drmP.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
+#include "amdgpu_reset.h"
/*
- * Fences
- * Fences mark an event in the GPUs pipeline and are used
- * for GPU/CPU synchronization. When the fence is written,
- * it is expected that all buffers associated with that fence
- * are no longer in use by the associated ring on the GPU and
- * that the the relevant GPU caches have been flushed.
- */
-
-struct amdgpu_fence {
- struct dma_fence base;
-
- /* RB, DMA, etc. */
- struct amdgpu_ring *ring;
-};
-
-static struct kmem_cache *amdgpu_fence_slab;
-
-int amdgpu_fence_slab_init(void)
-{
- amdgpu_fence_slab = kmem_cache_create(
- "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!amdgpu_fence_slab)
- return -ENOMEM;
- return 0;
-}
-
-void amdgpu_fence_slab_fini(void)
-{
- rcu_barrier();
- kmem_cache_destroy(amdgpu_fence_slab);
-}
-/*
* Cast helper
*/
static const struct dma_fence_ops amdgpu_fence_ops;
+static const struct dma_fence_ops amdgpu_job_fence_ops;
static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
{
struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
- if (__f->base.ops == &amdgpu_fence_ops)
+ if (__f->base.ops == &amdgpu_fence_ops ||
+ __f->base.ops == &amdgpu_job_fence_ops)
return __f;
return NULL;
@@ -127,43 +99,110 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
*
* @ring: ring the fence is associated with
* @f: resulting fence object
+ * @af: amdgpu fence input
+ * @flags: flags to pass into the subordinate .emit_fence() call
*
* Emits a fence command on the requested ring (all asics).
* Returns 0 on success, -ENOMEM on failure.
*/
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f)
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
+ struct amdgpu_fence *af, unsigned int flags)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_fence *fence;
- struct dma_fence *old, **ptr;
+ struct dma_fence *fence;
+ struct amdgpu_fence *am_fence;
+ struct dma_fence __rcu **ptr;
uint32_t seq;
+ int r;
- fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
- if (fence == NULL)
- return -ENOMEM;
+ if (!af) {
+ /* create a separate hw fence */
+ am_fence = kzalloc(sizeof(*am_fence), GFP_KERNEL);
+ if (!am_fence)
+ return -ENOMEM;
+ } else {
+ am_fence = af;
+ }
+ fence = &am_fence->base;
+ am_fence->ring = ring;
seq = ++ring->fence_drv.sync_seq;
- fence->ring = ring;
- dma_fence_init(&fence->base, &amdgpu_fence_ops,
- &ring->fence_drv.lock,
- adev->fence_context + ring->idx,
- seq);
- amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
- seq, AMDGPU_FENCE_FLAG_INT);
+ am_fence->seq = seq;
+ if (af) {
+ dma_fence_init(fence, &amdgpu_job_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx, seq);
+ /* Against remove in amdgpu_job_{free, free_cb} */
+ dma_fence_get(fence);
+ } else {
+ dma_fence_init(fence, &amdgpu_fence_ops,
+ &ring->fence_drv.lock,
+ adev->fence_context + ring->idx, seq);
+ }
+ amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
+ seq, flags | AMDGPU_FENCE_FLAG_INT);
+ amdgpu_fence_save_wptr(fence);
+ pm_runtime_get_noresume(adev_to_drm(adev)->dev);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
+ if (unlikely(rcu_dereference_protected(*ptr, 1))) {
+ struct dma_fence *old;
+
+ rcu_read_lock();
+ old = dma_fence_get_rcu_safe(ptr);
+ rcu_read_unlock();
+
+ if (old) {
+ r = dma_fence_wait(old, false);
+ dma_fence_put(old);
+ if (r)
+ return r;
+ }
+ }
+
+ to_amdgpu_fence(fence)->start_timestamp = ktime_get();
+
/* This function can't be called concurrently anyway, otherwise
* emitting the fence would mess up the hardware ring buffer.
*/
- old = rcu_dereference_protected(*ptr, 1);
- if (old && !dma_fence_is_signaled(old)) {
- DRM_INFO("rcu slot is busy\n");
- dma_fence_wait(old, false);
- }
+ rcu_assign_pointer(*ptr, dma_fence_get(fence));
+
+ *f = fence;
+
+ return 0;
+}
+
+/**
+ * amdgpu_fence_emit_polling - emit a fence on the requeste ring
+ *
+ * @ring: ring the fence is associated with
+ * @s: resulting sequence number
+ * @timeout: the timeout for waiting in usecs
+ *
+ * Emits a fence command on the requested ring (all asics).
+ * Used For polling fence.
+ * Returns 0 on success, -ENOMEM on failure.
+ */
+int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
+ uint32_t timeout)
+{
+ uint32_t seq;
+ signed long r;
- rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
+ if (!s)
+ return -EINVAL;
- *f = &fence->base;
+ seq = ++ring->fence_drv.sync_seq;
+ r = amdgpu_fence_wait_polling(ring,
+ seq - ring->fence_drv.num_fences_mask,
+ timeout);
+ if (r < 1)
+ return -ETIMEDOUT;
+
+ amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
+ seq, 0);
+
+ *s = seq;
return 0;
}
@@ -189,12 +228,14 @@ static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
* Checks the current fence value and calculates the last
* signalled fence value. Wakes the fence queue if the
* sequence number has increased.
+ *
+ * Returns true if fence was processed
*/
-void amdgpu_fence_process(struct amdgpu_ring *ring)
+bool amdgpu_fence_process(struct amdgpu_ring *ring)
{
struct amdgpu_fence_driver *drv = &ring->fence_drv;
+ struct amdgpu_device *adev = ring->adev;
uint32_t seq, last_seq;
- int r;
do {
last_seq = atomic_read(&ring->fence_drv.last_seq);
@@ -202,17 +243,19 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
} while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
- if (seq != ring->fence_drv.sync_seq)
+ if (timer_delete(&ring->fence_drv.fallback_timer) &&
+ seq != ring->fence_drv.sync_seq)
amdgpu_fence_schedule_fallback(ring);
if (unlikely(seq == last_seq))
- return;
+ return false;
last_seq &= drv->num_fences_mask;
seq &= drv->num_fences_mask;
do {
struct dma_fence *fence, **ptr;
+ struct amdgpu_fence *am_fence;
++last_seq;
last_seq &= drv->num_fences_mask;
@@ -225,34 +268,42 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
if (!fence)
continue;
- r = dma_fence_signal(fence);
- if (!r)
- DMA_FENCE_TRACE(fence, "signaled from irq context\n");
- else
- BUG();
-
+ /* Save the wptr in the fence driver so we know what the last processed
+ * wptr was. This is required for re-emitting the ring state for
+ * queues that are reset but are not guilty and thus have no guilty fence.
+ */
+ am_fence = container_of(fence, struct amdgpu_fence, base);
+ drv->signalled_wptr = am_fence->wptr;
+ dma_fence_signal(fence);
dma_fence_put(fence);
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
} while (last_seq != seq);
+
+ return true;
}
/**
* amdgpu_fence_fallback - fallback for hardware interrupts
*
- * @work: delayed work item
+ * @t: timer context used to obtain the pointer to ring structure
*
* Checks for fence activity.
*/
-static void amdgpu_fence_fallback(unsigned long arg)
+static void amdgpu_fence_fallback(struct timer_list *t)
{
- struct amdgpu_ring *ring = (void *)arg;
+ struct amdgpu_ring *ring = timer_container_of(ring, t,
+ fence_drv.fallback_timer);
- amdgpu_fence_process(ring);
+ if (amdgpu_fence_process(ring))
+ dev_warn(ring->adev->dev,
+ "Fence fallback timer expired on ring %s\n",
+ ring->name);
}
/**
* amdgpu_fence_wait_empty - wait for all fences to signal
*
- * @adev: amdgpu device pointer
* @ring: ring index the fence is associated with
*
* Wait for all fences on the requested ring to signal (all asics).
@@ -260,7 +311,7 @@ static void amdgpu_fence_fallback(unsigned long arg)
*/
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
{
- uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq);
+ uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
struct dma_fence *fence, **ptr;
int r;
@@ -282,6 +333,27 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
}
/**
+ * amdgpu_fence_wait_polling - busy wait for givn sequence number
+ *
+ * @ring: ring index the fence is associated with
+ * @wait_seq: sequence number to wait
+ * @timeout: the timeout for waiting in usecs
+ *
+ * Wait for all fences on the requested ring to signal (all asics).
+ * Returns left time if no timeout, 0 or minus if timeout.
+ */
+signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
+ uint32_t wait_seq,
+ signed long timeout)
+{
+
+ while ((int32_t)(wait_seq - amdgpu_fence_read(ring)) > 0 && timeout > 0) {
+ udelay(2);
+ timeout -= 2;
+ }
+ return timeout > 0 ? timeout : 0;
+}
+/**
* amdgpu_fence_count_emitted - get the count of emitted fences
*
* @ring: ring the fence is associated with
@@ -290,21 +362,71 @@ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
* Returns the number of emitted fences on the ring. Used by the
* dynpm code to ring track activity.
*/
-unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
+unsigned int amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
{
uint64_t emitted;
/* We are not protected by ring lock when reading the last sequence
* but it's ok to report slightly wrong fence count here.
*/
- amdgpu_fence_process(ring);
emitted = 0x100000000ull;
emitted -= atomic_read(&ring->fence_drv.last_seq);
- emitted += ACCESS_ONCE(ring->fence_drv.sync_seq);
+ emitted += READ_ONCE(ring->fence_drv.sync_seq);
return lower_32_bits(emitted);
}
/**
+ * amdgpu_fence_last_unsignaled_time_us - the time fence emitted until now
+ * @ring: ring the fence is associated with
+ *
+ * Find the earliest fence unsignaled until now, calculate the time delta
+ * between the time fence emitted and now.
+ */
+u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring)
+{
+ struct amdgpu_fence_driver *drv = &ring->fence_drv;
+ struct dma_fence *fence;
+ uint32_t last_seq, sync_seq;
+
+ last_seq = atomic_read(&ring->fence_drv.last_seq);
+ sync_seq = READ_ONCE(ring->fence_drv.sync_seq);
+ if (last_seq == sync_seq)
+ return 0;
+
+ ++last_seq;
+ last_seq &= drv->num_fences_mask;
+ fence = drv->fences[last_seq];
+ if (!fence)
+ return 0;
+
+ return ktime_us_delta(ktime_get(),
+ to_amdgpu_fence(fence)->start_timestamp);
+}
+
+/**
+ * amdgpu_fence_update_start_timestamp - update the timestamp of the fence
+ * @ring: ring the fence is associated with
+ * @seq: the fence seq number to update.
+ * @timestamp: the start timestamp to update.
+ *
+ * The function called at the time the fence and related ib is about to
+ * resubmit to gpu in MCBP scenario. Thus we do not consider race condition
+ * with amdgpu_fence_process to modify the same fence.
+ */
+void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq, ktime_t timestamp)
+{
+ struct amdgpu_fence_driver *drv = &ring->fence_drv;
+ struct dma_fence *fence;
+
+ seq &= drv->num_fences_mask;
+ fence = drv->fences[seq];
+ if (!fence)
+ return;
+
+ to_amdgpu_fence(fence)->start_timestamp = timestamp;
+}
+
+/**
* amdgpu_fence_driver_start_ring - make the fence driver
* ready for use on the requested ring.
*
@@ -319,30 +441,28 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
*/
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq_src,
- unsigned irq_type)
+ unsigned int irq_type)
{
struct amdgpu_device *adev = ring->adev;
uint64_t index;
- if (ring != &adev->uvd.ring) {
- ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
- ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
+ if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
+ ring->fence_drv.cpu_addr = ring->fence_cpu_addr;
+ ring->fence_drv.gpu_addr = ring->fence_gpu_addr;
} else {
/* put fence directly behind firmware */
index = ALIGN(adev->uvd.fw->size, 8);
- ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index;
- ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index;
+ ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
+ ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
}
amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
- amdgpu_irq_get(adev, irq_src, irq_type);
ring->fence_drv.irq_src = irq_src;
ring->fence_drv.irq_type = irq_type;
ring->fence_drv.initialized = true;
- dev_info(adev->dev, "fence driver on ring %d use gpu addr 0x%016llx, "
- "cpu addr 0x%p\n", ring->idx,
- ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
+ DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n",
+ ring->name, ring->fence_drv.gpu_addr);
return 0;
}
@@ -351,19 +471,18 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
* for the requested ring.
*
* @ring: ring to init the fence driver on
- * @num_hw_submission: number of entries on the hardware queue
*
* Init the fence driver for the requested ring (all asics).
* Helper function for amdgpu_fence_driver_init().
*/
-int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
- unsigned num_hw_submission)
+int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
{
- long timeout;
- int r;
+ struct amdgpu_device *adev = ring->adev;
- /* Check that num_hw_submission is a power of two */
- if ((num_hw_submission & (num_hw_submission - 1)) != 0)
+ if (!adev)
+ return -EINVAL;
+
+ if (!is_power_of_2(ring->num_hw_submission))
return -EINVAL;
ring->fence_drv.cpu_addr = NULL;
@@ -372,44 +491,21 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
atomic_set(&ring->fence_drv.last_seq, 0);
ring->fence_drv.initialized = false;
- setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
- (unsigned long)ring);
+ timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
- ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
+ ring->fence_drv.num_fences_mask = ring->num_hw_submission * 2 - 1;
spin_lock_init(&ring->fence_drv.lock);
- ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
+ ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *),
GFP_KERNEL);
+
if (!ring->fence_drv.fences)
return -ENOMEM;
- /* No need to setup the GPU scheduler for KIQ ring */
- if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
- timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
- if (timeout == 0) {
- /*
- * FIXME:
- * Delayed workqueue cannot use it directly,
- * so the scheduler will not use delayed workqueue if
- * MAX_SCHEDULE_TIMEOUT is set.
- * Currently keep it simple and silly.
- */
- timeout = MAX_SCHEDULE_TIMEOUT;
- }
- r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
- num_hw_submission,
- timeout, ring->name);
- if (r) {
- DRM_ERROR("Failed to create scheduler on ring %s.\n",
- ring->name);
- return r;
- }
- }
-
return 0;
}
/**
- * amdgpu_fence_driver_init - init the fence driver
+ * amdgpu_fence_driver_sw_init - init the fence driver
* for all possible rings.
*
* @adev: amdgpu device pointer
@@ -420,124 +516,303 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
* amdgpu_fence_driver_start_ring().
* Returns 0 for success.
*/
-int amdgpu_fence_driver_init(struct amdgpu_device *adev)
+int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
{
- if (amdgpu_debugfs_fence_init(adev))
- dev_err(adev->dev, "fence debugfs file creation failed\n");
-
return 0;
}
/**
- * amdgpu_fence_driver_fini - tear down the fence driver
+ * amdgpu_fence_need_ring_interrupt_restore - helper function to check whether
+ * fence driver interrupts need to be restored.
+ *
+ * @ring: ring that to be checked
+ *
+ * Interrupts for rings that belong to GFX IP don't need to be restored
+ * when the target power state is s0ix.
+ *
+ * Return true if need to restore interrupts, false otherwise.
+ */
+static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ bool is_gfx_power_domain = false;
+
+ switch (ring->funcs->type) {
+ case AMDGPU_RING_TYPE_SDMA:
+ /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
+ if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=
+ IP_VERSION(5, 0, 0))
+ is_gfx_power_domain = true;
+ break;
+ case AMDGPU_RING_TYPE_GFX:
+ case AMDGPU_RING_TYPE_COMPUTE:
+ case AMDGPU_RING_TYPE_KIQ:
+ case AMDGPU_RING_TYPE_MES:
+ is_gfx_power_domain = true;
+ break;
+ default:
+ break;
+ }
+
+ return !(adev->in_s0ix && is_gfx_power_domain);
+}
+
+/**
+ * amdgpu_fence_driver_hw_fini - tear down the fence driver
* for all possible rings.
*
* @adev: amdgpu device pointer
*
* Tear down the fence driver for all possible rings (all asics).
*/
-void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
+void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
{
- unsigned i, j;
- int r;
+ int i, r;
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !ring->fence_drv.initialized)
continue;
- r = amdgpu_fence_wait_empty(ring);
- if (r) {
- /* no need to trigger GPU reset as we are unloading */
- amdgpu_fence_driver_force_completion(adev);
- }
- amdgpu_irq_put(adev, ring->fence_drv.irq_src,
- ring->fence_drv.irq_type);
- amd_sched_fini(&ring->sched);
- del_timer_sync(&ring->fence_drv.fallback_timer);
- for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
- dma_fence_put(ring->fence_drv.fences[j]);
- kfree(ring->fence_drv.fences);
- ring->fence_drv.fences = NULL;
- ring->fence_drv.initialized = false;
+
+ /* You can't wait for HW to signal if it's gone */
+ if (!drm_dev_is_unplugged(adev_to_drm(adev)))
+ r = amdgpu_fence_wait_empty(ring);
+ else
+ r = -ENODEV;
+ /* no need to trigger GPU reset as we are unloading */
+ if (r)
+ amdgpu_fence_driver_force_completion(ring);
+
+ if (!drm_dev_is_unplugged(adev_to_drm(adev)) &&
+ ring->fence_drv.irq_src &&
+ amdgpu_fence_need_ring_interrupt_restore(ring))
+ amdgpu_irq_put(adev, ring->fence_drv.irq_src,
+ ring->fence_drv.irq_type);
+
+ timer_delete_sync(&ring->fence_drv.fallback_timer);
}
}
-/**
- * amdgpu_fence_driver_suspend - suspend the fence driver
- * for all possible rings.
- *
- * @adev: amdgpu device pointer
- *
- * Suspend the fence driver for all possible rings (all asics).
- */
-void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
+/* Will either stop and flush handlers for amdgpu interrupt or reanble it */
+void amdgpu_fence_driver_isr_toggle(struct amdgpu_device *adev, bool stop)
{
- int i, r;
+ int i;
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring || !ring->fence_drv.initialized || !ring->fence_drv.irq_src)
+ continue;
+
+ if (stop)
+ disable_irq(adev->irq.irq);
+ else
+ enable_irq(adev->irq.irq);
+ }
+}
+
+void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
if (!ring || !ring->fence_drv.initialized)
continue;
- /* wait for gpu to finish processing current batch */
- r = amdgpu_fence_wait_empty(ring);
- if (r) {
- /* delay GPU reset to resume */
- amdgpu_fence_driver_force_completion(adev);
- }
+ /*
+ * Notice we check for sched.ops since there's some
+ * override on the meaning of sched.ready by amdgpu.
+ * The natural check would be sched.ready, which is
+ * set as drm_sched_init() finishes...
+ */
+ if (ring->sched.ops)
+ drm_sched_fini(&ring->sched);
- /* disable the interrupt */
- amdgpu_irq_put(adev, ring->fence_drv.irq_src,
- ring->fence_drv.irq_type);
+ for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
+ dma_fence_put(ring->fence_drv.fences[j]);
+ kfree(ring->fence_drv.fences);
+ ring->fence_drv.fences = NULL;
+ ring->fence_drv.initialized = false;
}
}
/**
- * amdgpu_fence_driver_resume - resume the fence driver
+ * amdgpu_fence_driver_hw_init - enable the fence driver
* for all possible rings.
*
* @adev: amdgpu device pointer
*
- * Resume the fence driver for all possible rings (all asics).
+ * Enable the fence driver for all possible rings (all asics).
* Not all asics have all rings, so each asic will only
* start the fence driver on the rings it has using
* amdgpu_fence_driver_start_ring().
* Returns 0 for success.
*/
-void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
+void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
{
int i;
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
+
if (!ring || !ring->fence_drv.initialized)
continue;
/* enable the interrupt */
- amdgpu_irq_get(adev, ring->fence_drv.irq_src,
- ring->fence_drv.irq_type);
+ if (ring->fence_drv.irq_src &&
+ amdgpu_fence_need_ring_interrupt_restore(ring))
+ amdgpu_irq_get(adev, ring->fence_drv.irq_src,
+ ring->fence_drv.irq_type);
}
}
/**
- * amdgpu_fence_driver_force_completion - force all fence waiter to complete
+ * amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring
*
- * @adev: amdgpu device pointer
+ * @ring: fence of the ring to be cleared
*
- * In case of GPU reset failure make sure no process keep waiting on fence
- * that will never complete.
*/
-void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev)
+void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
{
int i;
+ struct dma_fence *old, **ptr;
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
- if (!ring || !ring->fence_drv.initialized)
- continue;
+ for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
+ ptr = &ring->fence_drv.fences[i];
+ old = rcu_dereference_protected(*ptr, 1);
+ if (old && old->ops == &amdgpu_job_fence_ops) {
+ struct amdgpu_job *job;
+
+ /* For non-scheduler bad job, i.e. failed ib test, we need to signal
+ * it right here or we won't be able to track them in fence_drv
+ * and they will remain unsignaled during sa_bo free.
+ */
+ job = container_of(old, struct amdgpu_job, hw_fence.base);
+ if (!job->base.s_fence && !dma_fence_is_signaled(old))
+ dma_fence_signal(old);
+ RCU_INIT_POINTER(*ptr, NULL);
+ dma_fence_put(old);
+ }
+ }
+}
+
+/**
+ * amdgpu_fence_driver_set_error - set error code on fences
+ * @ring: the ring which contains the fences
+ * @error: the error code to set
+ *
+ * Set an error code to all the fences pending on the ring.
+ */
+void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error)
+{
+ struct amdgpu_fence_driver *drv = &ring->fence_drv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&drv->lock, flags);
+ for (unsigned int i = 0; i <= drv->num_fences_mask; ++i) {
+ struct dma_fence *fence;
+
+ fence = rcu_dereference_protected(drv->fences[i],
+ lockdep_is_held(&drv->lock));
+ if (fence && !dma_fence_is_signaled_locked(fence))
+ dma_fence_set_error(fence, error);
+ }
+ spin_unlock_irqrestore(&drv->lock, flags);
+}
+
+/**
+ * amdgpu_fence_driver_force_completion - force signal latest fence of ring
+ *
+ * @ring: fence of the ring to signal
+ *
+ */
+void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
+{
+ amdgpu_fence_driver_set_error(ring, -ECANCELED);
+ amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
+ amdgpu_fence_process(ring);
+}
+
+
+/*
+ * Kernel queue reset handling
+ *
+ * The driver can reset individual queues for most engines, but those queues
+ * may contain work from multiple contexts. Resetting the queue will reset
+ * lose all of that state. In order to minimize the collateral damage, the
+ * driver will save the ring contents which are not associated with the guilty
+ * context prior to resetting the queue. After resetting the queue the queue
+ * contents from the other contexts is re-emitted to the rings so that it can
+ * be processed by the engine. To handle this, we save the queue's write
+ * pointer (wptr) in the fences associated with each context. If we get a
+ * queue timeout, we can then use the wptrs from the fences to determine
+ * which data needs to be saved out of the queue's ring buffer.
+ */
+
+/**
+ * amdgpu_fence_driver_guilty_force_completion - force signal of specified sequence
+ *
+ * @fence: fence of the ring to signal
+ *
+ */
+void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence)
+{
+ dma_fence_set_error(&fence->base, -ETIME);
+ amdgpu_fence_write(fence->ring, fence->seq);
+ amdgpu_fence_process(fence->ring);
+}
+
+void amdgpu_fence_save_wptr(struct dma_fence *fence)
+{
+ struct amdgpu_fence *am_fence = container_of(fence, struct amdgpu_fence, base);
+
+ am_fence->wptr = am_fence->ring->wptr;
+}
+
+static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring,
+ u64 start_wptr, u32 end_wptr)
+{
+ unsigned int first_idx = start_wptr & ring->buf_mask;
+ unsigned int last_idx = end_wptr & ring->buf_mask;
+ unsigned int i;
+
+ /* Backup the contents of the ring buffer. */
+ for (i = first_idx; i != last_idx; ++i, i &= ring->buf_mask)
+ ring->ring_backup[ring->ring_backup_entries_to_copy++] = ring->ring[i];
+}
+
+void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence)
+{
+ struct dma_fence *unprocessed;
+ struct dma_fence __rcu **ptr;
+ struct amdgpu_fence *fence;
+ u64 wptr, i, seqno;
+
+ seqno = amdgpu_fence_read(ring);
+ wptr = ring->fence_drv.signalled_wptr;
+ ring->ring_backup_entries_to_copy = 0;
+
+ for (i = seqno + 1; i <= ring->fence_drv.sync_seq; ++i) {
+ ptr = &ring->fence_drv.fences[i & ring->fence_drv.num_fences_mask];
+ rcu_read_lock();
+ unprocessed = rcu_dereference(*ptr);
+
+ if (unprocessed && !dma_fence_is_signaled(unprocessed)) {
+ fence = container_of(unprocessed, struct amdgpu_fence, base);
- amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
+ /* save everything if the ring is not guilty, otherwise
+ * just save the content from other contexts.
+ */
+ if (!guilty_fence || (fence->context != guilty_fence->context))
+ amdgpu_ring_backup_unprocessed_command(ring, wptr,
+ fence->wptr);
+ wptr = fence->wptr;
+ }
+ rcu_read_unlock();
}
}
@@ -552,13 +827,19 @@ static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
{
- struct amdgpu_fence *fence = to_amdgpu_fence(f);
- return (const char *)fence->ring->name;
+ return (const char *)to_amdgpu_fence(f)->ring->name;
+}
+
+static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f)
+{
+ struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base);
+
+ return (const char *)to_amdgpu_ring(job->base.sched)->name;
}
/**
* amdgpu_fence_enable_signaling - enable signalling on fence
- * @fence: fence
+ * @f: fence
*
* This function is called with fence_queue lock held, and adds a callback
* to fence_queue that checks if this fence is signaled, and if so it
@@ -566,13 +847,25 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
*/
static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
{
- struct amdgpu_fence *fence = to_amdgpu_fence(f);
- struct amdgpu_ring *ring = fence->ring;
+ if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer))
+ amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring);
- if (!timer_pending(&ring->fence_drv.fallback_timer))
- amdgpu_fence_schedule_fallback(ring);
+ return true;
+}
- DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
+/**
+ * amdgpu_job_fence_enable_signaling - enable signalling on job fence
+ * @f: fence
+ *
+ * This is the simliar function with amdgpu_fence_enable_signaling above, it
+ * only handles the job embedded fence.
+ */
+static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f)
+{
+ struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base);
+
+ if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer))
+ amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched));
return true;
}
@@ -587,14 +880,30 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
static void amdgpu_fence_free(struct rcu_head *rcu)
{
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
- struct amdgpu_fence *fence = to_amdgpu_fence(f);
- kmem_cache_free(amdgpu_fence_slab, fence);
+
+ /* free fence_slab if it's separated fence*/
+ kfree(to_amdgpu_fence(f));
+}
+
+/**
+ * amdgpu_job_fence_free - free up the job with embedded fence
+ *
+ * @rcu: RCU callback head
+ *
+ * Free up the job with embedded fence after the RCU grace period.
+ */
+static void amdgpu_job_fence_free(struct rcu_head *rcu)
+{
+ struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
+
+ /* free job if fence has a parent job */
+ kfree(container_of(f, struct amdgpu_job, hw_fence.base));
}
/**
* amdgpu_fence_release - callback that fence can be freed
*
- * @fence: fence
+ * @f: fence
*
* This function is called when the reference count becomes zero.
* It just RCU schedules freeing up the fence.
@@ -604,70 +913,147 @@ static void amdgpu_fence_release(struct dma_fence *f)
call_rcu(&f->rcu, amdgpu_fence_free);
}
+/**
+ * amdgpu_job_fence_release - callback that job embedded fence can be freed
+ *
+ * @f: fence
+ *
+ * This is the simliar function with amdgpu_fence_release above, it
+ * only handles the job embedded fence.
+ */
+static void amdgpu_job_fence_release(struct dma_fence *f)
+{
+ call_rcu(&f->rcu, amdgpu_job_fence_free);
+}
+
static const struct dma_fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name,
.enable_signaling = amdgpu_fence_enable_signaling,
- .wait = dma_fence_default_wait,
.release = amdgpu_fence_release,
};
+static const struct dma_fence_ops amdgpu_job_fence_ops = {
+ .get_driver_name = amdgpu_fence_get_driver_name,
+ .get_timeline_name = amdgpu_job_fence_get_timeline_name,
+ .enable_signaling = amdgpu_job_fence_enable_signaling,
+ .release = amdgpu_job_fence_release,
+};
+
/*
* Fence debugfs
*/
#if defined(CONFIG_DEBUG_FS)
-static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
+static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = m->private;
int i;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
+
if (!ring || !ring->fence_drv.initialized)
continue;
amdgpu_fence_process(ring);
seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
- seq_printf(m, "Last signaled fence 0x%08x\n",
+ seq_printf(m, "Last signaled fence 0x%08x\n",
atomic_read(&ring->fence_drv.last_seq));
- seq_printf(m, "Last emitted 0x%08x\n",
+ seq_printf(m, "Last emitted 0x%08x\n",
ring->fence_drv.sync_seq);
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_GFX ||
+ ring->funcs->type == AMDGPU_RING_TYPE_SDMA) {
+ seq_printf(m, "Last signaled trailing fence 0x%08x\n",
+ le32_to_cpu(*ring->trail_fence_cpu_addr));
+ seq_printf(m, "Last emitted 0x%08x\n",
+ ring->trail_seq);
+ }
+
+ if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
+ continue;
+
+ /* set in CP_VMID_PREEMPT and preemption occurred */
+ seq_printf(m, "Last preempted 0x%08x\n",
+ le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
+ /* set in CP_VMID_RESET and reset occurred */
+ seq_printf(m, "Last reset 0x%08x\n",
+ le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
+ /* Both preemption and reset occurred */
+ seq_printf(m, "Last both 0x%08x\n",
+ le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
}
return 0;
}
-/**
- * amdgpu_debugfs_gpu_reset - manually trigger a gpu reset
+/*
+ * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover
*
* Manually trigger a gpu reset at the next fence wait.
*/
-static int amdgpu_debugfs_gpu_reset(struct seq_file *m, void *data)
+static int gpu_recover_get(void *data, u64 *val)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ struct drm_device *dev = adev_to_drm(adev);
+ int r;
+
+ r = pm_runtime_get_sync(dev->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(dev->dev);
+ return 0;
+ }
+
+ if (amdgpu_reset_domain_schedule(adev->reset_domain, &adev->reset_work))
+ flush_work(&adev->reset_work);
+
+ *val = atomic_read(&adev->reset_domain->reset_res);
- seq_printf(m, "gpu reset\n");
- amdgpu_gpu_reset(adev);
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
return 0;
}
-static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
- {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
- {"amdgpu_gpu_reset", &amdgpu_debugfs_gpu_reset, 0, NULL}
-};
+DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_fence_info);
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gpu_recover_fops, gpu_recover_get, NULL,
+ "%lld\n");
+
+static void amdgpu_debugfs_reset_work(struct work_struct *work)
+{
+ struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
+ reset_work);
+
+ struct amdgpu_reset_context reset_context;
+
+ memset(&reset_context, 0, sizeof(reset_context));
+
+ reset_context.method = AMD_RESET_METHOD_NONE;
+ reset_context.reset_req_dev = adev;
+ reset_context.src = AMDGPU_RESET_SRC_USER;
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
+
+ amdgpu_device_gpu_recover(adev, NULL, &reset_context);
+}
+
#endif
-int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
+void amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
- return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2);
-#else
- return 0;
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+
+ debugfs_create_file("amdgpu_fence_info", 0444, root, adev,
+ &amdgpu_debugfs_fence_info_fops);
+
+ if (!amdgpu_sriov_vf(adev)) {
+
+ INIT_WORK(&adev->reset_work, amdgpu_debugfs_reset_work);
+ debugfs_create_file("amdgpu_gpu_recover", 0444, root, adev,
+ &amdgpu_debugfs_gpu_recover_fops);
+ }
#endif
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
new file mode 100644
index 000000000000..b0082aa7f3c6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -0,0 +1,408 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/pci.h>
+
+#include "amdgpu.h"
+#include "amdgpu_i2c.h"
+#include "smu_v11_0_i2c.h"
+#include "atom.h"
+#include "amdgpu_fru_eeprom.h"
+#include "amdgpu_eeprom.h"
+
+#define FRU_EEPROM_MADDR_6 0x60000
+#define FRU_EEPROM_MADDR_8 0x80000
+#define FRU_EEPROM_MADDR_INV 0xFFFFF
+
+static bool is_fru_eeprom_supported(struct amdgpu_device *adev, u32 *fru_addr)
+{
+ /* Only server cards have the FRU EEPROM
+ * TODO: See if we can figure this out dynamically instead of
+ * having to parse VBIOS versions.
+ */
+ struct atom_context *atom_ctx = adev->mode_info.atom_context;
+
+ /* The i2c access is blocked on VF
+ * TODO: Need other way to get the info
+ * Also, FRU not valid for APU devices.
+ */
+ if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
+ return false;
+
+ /* The default I2C EEPROM address of the FRU.
+ */
+ if (fru_addr)
+ *fru_addr = FRU_EEPROM_MADDR_8;
+
+ /* VBIOS is of the format ###-DXXXYYYY-##. For SKU identification,
+ * we can use just the "DXXX" portion. If there were more models, we
+ * could convert the 3 characters to a hex integer and use a switch
+ * for ease/speed/readability. For now, 2 string comparisons are
+ * reasonable and not too expensive
+ */
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(11, 0, 2):
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ /* D161 and D163 are the VG20 server SKUs */
+ if (atom_ctx && (strnstr(atom_ctx->vbios_pn, "D161",
+ sizeof(atom_ctx->vbios_pn)) ||
+ strnstr(atom_ctx->vbios_pn, "D163",
+ sizeof(atom_ctx->vbios_pn)))) {
+ if (fru_addr)
+ *fru_addr = FRU_EEPROM_MADDR_6;
+ return true;
+ } else {
+ return false;
+ }
+ case CHIP_ARCTURUS:
+ default:
+ return false;
+ }
+ case IP_VERSION(11, 0, 7):
+ if (atom_ctx && strnstr(atom_ctx->vbios_pn, "D603",
+ sizeof(atom_ctx->vbios_pn))) {
+ if (strnstr(atom_ctx->vbios_pn, "D603GLXE",
+ sizeof(atom_ctx->vbios_pn))) {
+ return false;
+ }
+
+ if (fru_addr)
+ *fru_addr = FRU_EEPROM_MADDR_6;
+ return true;
+
+ } else {
+ return false;
+ }
+ case IP_VERSION(13, 0, 2):
+ /* All Aldebaran SKUs have an FRU */
+ if (atom_ctx && !strnstr(atom_ctx->vbios_pn, "D673",
+ sizeof(atom_ctx->vbios_pn)))
+ if (fru_addr)
+ *fru_addr = FRU_EEPROM_MADDR_6;
+ return true;
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 14):
+ if (fru_addr)
+ *fru_addr = FRU_EEPROM_MADDR_8;
+ return true;
+ case IP_VERSION(13, 0, 12):
+ if (fru_addr)
+ *fru_addr = FRU_EEPROM_MADDR_INV;
+ return true;
+ default:
+ return false;
+ }
+}
+
+int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
+{
+ struct amdgpu_fru_info *fru_info;
+ unsigned char buf[8], *pia;
+ u32 addr, fru_addr;
+ int size, len;
+ u8 csum;
+
+ if (!is_fru_eeprom_supported(adev, &fru_addr))
+ return 0;
+
+ /* FRU data avaialble, but no direct EEPROM access */
+ if (fru_addr == FRU_EEPROM_MADDR_INV)
+ return 0;
+
+ if (!adev->fru_info) {
+ adev->fru_info = kzalloc(sizeof(*adev->fru_info), GFP_KERNEL);
+ if (!adev->fru_info)
+ return -ENOMEM;
+ }
+
+ fru_info = adev->fru_info;
+ /* For Arcturus-and-later, default value of serial_number is unique_id
+ * so convert it to a 16-digit HEX string for convenience and
+ * backwards-compatibility.
+ */
+ sprintf(fru_info->serial, "%llx", adev->unique_id);
+
+ /* If algo exists, it means that the i2c_adapter's initialized */
+ if (!adev->pm.fru_eeprom_i2c_bus || !adev->pm.fru_eeprom_i2c_bus->algo) {
+ dev_warn(adev->dev,
+ "Cannot access FRU, EEPROM accessor not initialized");
+ return -ENODEV;
+ }
+
+ /* Read the IPMI Common header */
+ len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, fru_addr, buf,
+ sizeof(buf));
+ if (len != 8) {
+ dev_err(adev->dev, "Couldn't read the IPMI Common Header: %d",
+ len);
+ return len < 0 ? len : -EIO;
+ }
+
+ if (buf[0] != 1) {
+ dev_err(adev->dev, "Bad IPMI Common Header version: 0x%02x",
+ buf[0]);
+ return -EIO;
+ }
+
+ for (csum = 0; len > 0; len--)
+ csum += buf[len - 1];
+ if (csum) {
+ dev_err(adev->dev, "Bad IPMI Common Header checksum: 0x%02x",
+ csum);
+ return -EIO;
+ }
+
+ /* Get the offset to the Product Info Area (PIA). */
+ addr = buf[4] * 8;
+ if (!addr)
+ return 0;
+
+ /* Get the absolute address to the PIA. */
+ addr += fru_addr;
+
+ /* Read the header of the PIA. */
+ len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addr, buf, 3);
+ if (len != 3) {
+ dev_err(adev->dev,
+ "Couldn't read the Product Info Area header: %d", len);
+ return len < 0 ? len : -EIO;
+ }
+
+ if (buf[0] != 1) {
+ dev_err(adev->dev, "Bad IPMI Product Info Area version: 0x%02x",
+ buf[0]);
+ return -EIO;
+ }
+
+ size = buf[1] * 8;
+ pia = kzalloc(size, GFP_KERNEL);
+ if (!pia)
+ return -ENOMEM;
+
+ /* Read the whole PIA. */
+ len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addr, pia, size);
+ if (len != size) {
+ kfree(pia);
+ dev_err(adev->dev, "Couldn't read the Product Info Area: %d",
+ len);
+ return len < 0 ? len : -EIO;
+ }
+
+ for (csum = 0; size > 0; size--)
+ csum += pia[size - 1];
+ if (csum) {
+ dev_err(adev->dev, "Bad Product Info Area checksum: 0x%02x",
+ csum);
+ kfree(pia);
+ return -EIO;
+ }
+
+ /* Now extract useful information from the PIA.
+ *
+ * Read Manufacturer Name field whose length is [3].
+ */
+ addr = 3;
+ if (addr + 1 >= len)
+ goto Out;
+ memcpy(fru_info->manufacturer_name, pia + addr + 1,
+ min_t(size_t, sizeof(fru_info->manufacturer_name),
+ pia[addr] & 0x3F));
+ fru_info->manufacturer_name[sizeof(fru_info->manufacturer_name) - 1] =
+ '\0';
+
+ /* Read Product Name field. */
+ addr += 1 + (pia[addr] & 0x3F);
+ if (addr + 1 >= len)
+ goto Out;
+ memcpy(fru_info->product_name, pia + addr + 1,
+ min_t(size_t, sizeof(fru_info->product_name), pia[addr] & 0x3F));
+ fru_info->product_name[sizeof(fru_info->product_name) - 1] = '\0';
+
+ /* Go to the Product Part/Model Number field. */
+ addr += 1 + (pia[addr] & 0x3F);
+ if (addr + 1 >= len)
+ goto Out;
+ memcpy(fru_info->product_number, pia + addr + 1,
+ min_t(size_t, sizeof(fru_info->product_number),
+ pia[addr] & 0x3F));
+ fru_info->product_number[sizeof(fru_info->product_number) - 1] = '\0';
+
+ /* Go to the Product Version field. */
+ addr += 1 + (pia[addr] & 0x3F);
+
+ /* Go to the Product Serial Number field. */
+ addr += 1 + (pia[addr] & 0x3F);
+ if (addr + 1 >= len)
+ goto Out;
+ memcpy(fru_info->serial, pia + addr + 1,
+ min_t(size_t, sizeof(fru_info->serial), pia[addr] & 0x3F));
+ fru_info->serial[sizeof(fru_info->serial) - 1] = '\0';
+
+ /* Asset Tag field */
+ addr += 1 + (pia[addr] & 0x3F);
+
+ /* FRU File Id field. This could be 'null'. */
+ addr += 1 + (pia[addr] & 0x3F);
+ if ((addr + 1 >= len) || !(pia[addr] & 0x3F))
+ goto Out;
+ memcpy(fru_info->fru_id, pia + addr + 1,
+ min_t(size_t, sizeof(fru_info->fru_id), pia[addr] & 0x3F));
+ fru_info->fru_id[sizeof(fru_info->fru_id) - 1] = '\0';
+
+Out:
+ kfree(pia);
+ return 0;
+}
+
+/**
+ * DOC: product_name
+ *
+ * The amdgpu driver provides a sysfs API for reporting the product name
+ * for the device
+ * The file product_name is used for this and returns the product name
+ * as returned from the FRU.
+ * NOTE: This is only available for certain server cards
+ */
+
+static ssize_t amdgpu_fru_product_name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ return sysfs_emit(buf, "%s\n", adev->fru_info->product_name);
+}
+
+static DEVICE_ATTR(product_name, 0444, amdgpu_fru_product_name_show, NULL);
+
+/**
+ * DOC: product_number
+ *
+ * The amdgpu driver provides a sysfs API for reporting the part number
+ * for the device
+ * The file product_number is used for this and returns the part number
+ * as returned from the FRU.
+ * NOTE: This is only available for certain server cards
+ */
+
+static ssize_t amdgpu_fru_product_number_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ return sysfs_emit(buf, "%s\n", adev->fru_info->product_number);
+}
+
+static DEVICE_ATTR(product_number, 0444, amdgpu_fru_product_number_show, NULL);
+
+/**
+ * DOC: serial_number
+ *
+ * The amdgpu driver provides a sysfs API for reporting the serial number
+ * for the device
+ * The file serial_number is used for this and returns the serial number
+ * as returned from the FRU.
+ * NOTE: This is only available for certain server cards
+ */
+
+static ssize_t amdgpu_fru_serial_number_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ return sysfs_emit(buf, "%s\n", adev->fru_info->serial);
+}
+
+static DEVICE_ATTR(serial_number, 0444, amdgpu_fru_serial_number_show, NULL);
+
+/**
+ * DOC: fru_id
+ *
+ * The amdgpu driver provides a sysfs API for reporting FRU File Id
+ * for the device.
+ * The file fru_id is used for this and returns the File Id value
+ * as returned from the FRU.
+ * NOTE: This is only available for certain server cards
+ */
+
+static ssize_t amdgpu_fru_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ return sysfs_emit(buf, "%s\n", adev->fru_info->fru_id);
+}
+
+static DEVICE_ATTR(fru_id, 0444, amdgpu_fru_id_show, NULL);
+
+/**
+ * DOC: manufacturer
+ *
+ * The amdgpu driver provides a sysfs API for reporting manufacturer name from
+ * FRU information.
+ * The file manufacturer returns the value as returned from the FRU.
+ * NOTE: This is only available for certain server cards
+ */
+
+static ssize_t amdgpu_fru_manufacturer_name_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ return sysfs_emit(buf, "%s\n", adev->fru_info->manufacturer_name);
+}
+
+static DEVICE_ATTR(manufacturer, 0444, amdgpu_fru_manufacturer_name_show, NULL);
+
+static const struct attribute *amdgpu_fru_attributes[] = {
+ &dev_attr_product_name.attr,
+ &dev_attr_product_number.attr,
+ &dev_attr_serial_number.attr,
+ &dev_attr_fru_id.attr,
+ &dev_attr_manufacturer.attr,
+ NULL
+};
+
+int amdgpu_fru_sysfs_init(struct amdgpu_device *adev)
+{
+ if (!is_fru_eeprom_supported(adev, NULL) || !adev->fru_info)
+ return 0;
+
+ return sysfs_create_files(&adev->dev->kobj, amdgpu_fru_attributes);
+}
+
+void amdgpu_fru_sysfs_fini(struct amdgpu_device *adev)
+{
+ if (!adev->fru_info)
+ return;
+
+ sysfs_remove_files(&adev->dev->kobj, amdgpu_fru_attributes);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
new file mode 100644
index 000000000000..98f3196599ef
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_FRU_EEPROM_H__
+#define __AMDGPU_FRU_EEPROM_H__
+
+#define AMDGPU_PRODUCT_NAME_LEN 64
+
+/* FRU product information */
+struct amdgpu_fru_info {
+ char product_number[20];
+ char product_name[AMDGPU_PRODUCT_NAME_LEN];
+ char serial[20];
+ char manufacturer_name[32];
+ char fru_id[50];
+};
+
+int amdgpu_fru_get_product_info(struct amdgpu_device *adev);
+int amdgpu_fru_sysfs_init(struct amdgpu_device *adev);
+void amdgpu_fru_sysfs_fini(struct amdgpu_device *adev);
+
+#endif // __AMDGPU_FRU_EEPROM_H__
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
new file mode 100644
index 000000000000..328a1b963548
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/firmware.h>
+#include <linux/dma-mapping.h>
+
+#include "amdgpu.h"
+#include "amdgpu_fw_attestation.h"
+#include "amdgpu_psp.h"
+#include "amdgpu_ucode.h"
+#include "soc15_common.h"
+
+#define FW_ATTESTATION_DB_COOKIE 0x143b6a37
+#define FW_ATTESTATION_RECORD_VALID 1
+#define FW_ATTESTATION_MAX_SIZE 4096
+
+struct FW_ATT_DB_HEADER {
+ uint32_t AttDbVersion; /* version of the fwar feature */
+ uint32_t AttDbCookie; /* cookie as an extra check for corrupt data */
+};
+
+struct FW_ATT_RECORD {
+ uint16_t AttFwIdV1; /* Legacy FW Type field */
+ uint16_t AttFwIdV2; /* V2 FW ID field */
+ uint32_t AttFWVersion; /* FW Version */
+ uint16_t AttFWActiveFunctionID; /* The VF ID (only in VF Attestation Table) */
+ uint8_t AttSource; /* FW source indicator */
+ uint8_t RecordValid; /* Indicates whether the record is a valid entry */
+ uint32_t AttFwTaId; /* Ta ID (only in TA Attestation Table) */
+};
+
+static ssize_t amdgpu_fw_attestation_debugfs_read(struct file *f,
+ char __user *buf,
+ size_t size,
+ loff_t *pos)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ uint64_t records_addr = 0;
+ uint64_t vram_pos = 0;
+ struct FW_ATT_DB_HEADER fw_att_hdr = {0};
+ struct FW_ATT_RECORD fw_att_record = {0};
+
+ if (size < sizeof(struct FW_ATT_RECORD)) {
+ DRM_WARN("FW attestation input buffer not enough memory");
+ return -EINVAL;
+ }
+
+ if ((*pos + sizeof(struct FW_ATT_DB_HEADER)) >= FW_ATTESTATION_MAX_SIZE) {
+ DRM_WARN("FW attestation out of bounds");
+ return 0;
+ }
+
+ if (psp_get_fw_attestation_records_addr(&adev->psp, &records_addr)) {
+ DRM_WARN("Failed to get FW attestation record address");
+ return -EINVAL;
+ }
+
+ vram_pos = records_addr - adev->gmc.vram_start;
+
+ if (*pos == 0) {
+ amdgpu_device_vram_access(adev,
+ vram_pos,
+ (uint32_t *)&fw_att_hdr,
+ sizeof(struct FW_ATT_DB_HEADER),
+ false);
+
+ if (fw_att_hdr.AttDbCookie != FW_ATTESTATION_DB_COOKIE) {
+ DRM_WARN("Invalid FW attestation cookie");
+ return -EINVAL;
+ }
+
+ DRM_INFO("FW attestation version = 0x%X", fw_att_hdr.AttDbVersion);
+ }
+
+ amdgpu_device_vram_access(adev,
+ vram_pos + sizeof(struct FW_ATT_DB_HEADER) + *pos,
+ (uint32_t *)&fw_att_record,
+ sizeof(struct FW_ATT_RECORD),
+ false);
+
+ if (fw_att_record.RecordValid != FW_ATTESTATION_RECORD_VALID)
+ return 0;
+
+ if (copy_to_user(buf, (void *)&fw_att_record, sizeof(struct FW_ATT_RECORD)))
+ return -EINVAL;
+
+ *pos += sizeof(struct FW_ATT_RECORD);
+
+ return sizeof(struct FW_ATT_RECORD);
+}
+
+static const struct file_operations amdgpu_fw_attestation_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_fw_attestation_debugfs_read,
+ .write = NULL,
+ .llseek = default_llseek
+};
+
+static int amdgpu_is_fw_attestation_supported(struct amdgpu_device *adev)
+{
+ if (adev->flags & AMD_IS_APU)
+ return 0;
+
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(14, 0, 2) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(14, 0, 3))
+ return 0;
+
+ if (adev->asic_type >= CHIP_SIENNA_CICHLID)
+ return 1;
+
+ return 0;
+}
+
+void amdgpu_fw_attestation_debugfs_init(struct amdgpu_device *adev)
+{
+ if (!amdgpu_is_fw_attestation_supported(adev))
+ return;
+
+ debugfs_create_file("amdgpu_fw_attestation",
+ 0400,
+ adev_to_drm(adev)->primary->debugfs_root,
+ adev,
+ &amdgpu_fw_attestation_debugfs_ops);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.h
new file mode 100644
index 000000000000..90af4fe58c99
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#ifndef _AMDGPU_FW_ATTESTATION_H
+#define _AMDGPU_FW_ATTESTATION_H
+
+#include "amdgpu.h"
+
+void amdgpu_fw_attestation_debugfs_init(struct amdgpu_device *adev);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 902e6015abca..b2033f8352f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -25,12 +25,18 @@
* Alex Deucher
* Jerome Glisse
*/
-#include <drm/drmP.h>
+
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+
#include <drm/amdgpu_drm.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
#endif
#include "amdgpu.h"
+#include "amdgpu_reset.h"
+#include <drm/drm_drv.h>
+#include <drm/ttm/ttm_tt.h>
/*
* GART
@@ -55,144 +61,213 @@
/*
* Common GART table functions.
*/
+
/**
- * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table
+ * amdgpu_gart_dummy_page_init - init dummy page used by the driver
*
* @adev: amdgpu_device pointer
*
- * Allocate system memory for GART page table
- * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the
- * gart table to be in system memory.
- * Returns 0 for success, -ENOMEM for failure.
+ * Allocate the dummy page used by the driver (all asics).
+ * This dummy page is used by the driver as a filler for gart entries
+ * when pages are taken out of the GART
+ * Returns 0 on sucess, -ENOMEM on failure.
*/
-int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
+static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
{
- void *ptr;
+ struct page *dummy_page = ttm_glob.dummy_read_page;
- ptr = pci_alloc_consistent(adev->pdev, adev->gart.table_size,
- &adev->gart.table_addr);
- if (ptr == NULL) {
+ if (adev->dummy_page_addr)
+ return 0;
+ adev->dummy_page_addr = dma_map_page_attrs(&adev->pdev->dev, dummy_page, 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(&adev->pdev->dev, adev->dummy_page_addr)) {
+ dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
+ adev->dummy_page_addr = 0;
return -ENOMEM;
}
-#ifdef CONFIG_X86
- if (0) {
- set_memory_uc((unsigned long)ptr,
- adev->gart.table_size >> PAGE_SHIFT);
- }
-#endif
- adev->gart.ptr = ptr;
- memset((void *)adev->gart.ptr, 0, adev->gart.table_size);
return 0;
}
/**
- * amdgpu_gart_table_ram_free - free system ram for gart page table
+ * amdgpu_gart_dummy_page_fini - free dummy page used by the driver
*
* @adev: amdgpu_device pointer
*
- * Free system memory for GART page table
- * (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the
- * gart table to be in system memory.
+ * Frees the dummy page used by the driver (all asics).
*/
-void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
+void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
{
- if (adev->gart.ptr == NULL) {
+ if (!adev->dummy_page_addr)
return;
- }
-#ifdef CONFIG_X86
- if (0) {
- set_memory_wb((unsigned long)adev->gart.ptr,
- adev->gart.table_size >> PAGE_SHIFT);
- }
-#endif
- pci_free_consistent(adev->pdev, adev->gart.table_size,
- (void *)adev->gart.ptr,
- adev->gart.table_addr);
- adev->gart.ptr = NULL;
- adev->gart.table_addr = 0;
+ dma_unmap_page_attrs(&adev->pdev->dev, adev->dummy_page_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ adev->dummy_page_addr = 0;
}
/**
- * amdgpu_gart_table_vram_alloc - allocate vram for gart page table
+ * amdgpu_gart_table_ram_alloc - allocate system ram for gart page table
*
* @adev: amdgpu_device pointer
*
- * Allocate video memory for GART page table
- * (pcie r4xx, r5xx+). These asics require the
- * gart table to be in video memory.
+ * Allocate system memory for GART page table for ASICs that don't have
+ * dedicated VRAM.
* Returns 0 for success, error for failure.
*/
-int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
+int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
{
- int r;
+ unsigned int order = get_order(adev->gart.table_size);
+ gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO;
+ struct amdgpu_bo *bo = NULL;
+ struct sg_table *sg = NULL;
+ struct amdgpu_bo_param bp;
+ dma_addr_t dma_addr;
+ struct page *p;
+ unsigned long x;
+ int ret;
- if (adev->gart.robj == NULL) {
- r = amdgpu_bo_create(adev, adev->gart.table_size,
- PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &adev->gart.robj);
- if (r) {
- return r;
- }
+ if (adev->gart.bo != NULL)
+ return 0;
+
+ p = alloc_pages(gfp_flags, order);
+ if (!p)
+ return -ENOMEM;
+
+ /* assign pages to this device */
+ for (x = 0; x < (1UL << order); x++)
+ p[x].mapping = adev->mman.bdev.dev_mapping;
+
+ /* If the hardware does not support UTCL2 snooping of the CPU caches
+ * then set_memory_wc() could be used as a workaround to mark the pages
+ * as write combine memory.
+ */
+ dma_addr = dma_map_page(&adev->pdev->dev, p, 0, adev->gart.table_size,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&adev->pdev->dev, dma_addr)) {
+ dev_err(&adev->pdev->dev, "Failed to DMA MAP the GART BO page\n");
+ __free_pages(p, order);
+ p = NULL;
+ return -EFAULT;
+ }
+
+ dev_info(adev->dev, "%s dma_addr:%pad\n", __func__, &dma_addr);
+ /* Create SG table */
+ sg = kmalloc(sizeof(*sg), GFP_KERNEL);
+ if (!sg) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ ret = sg_alloc_table(sg, 1, GFP_KERNEL);
+ if (ret)
+ goto error;
+
+ sg_dma_address(sg->sgl) = dma_addr;
+ sg->sgl->length = adev->gart.table_size;
+#ifdef CONFIG_NEED_SG_DMA_LENGTH
+ sg->sgl->dma_length = adev->gart.table_size;
+#endif
+ /* Create SG BO */
+ memset(&bp, 0, sizeof(bp));
+ bp.size = adev->gart.table_size;
+ bp.byte_align = PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_CPU;
+ bp.type = ttm_bo_type_sg;
+ bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+ bp.flags = 0;
+ ret = amdgpu_bo_create(adev, &bp, &bo);
+ if (ret)
+ goto error;
+
+ bo->tbo.sg = sg;
+ bo->tbo.ttm->sg = sg;
+ bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
+ bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
+
+ ret = amdgpu_bo_reserve(bo, true);
+ if (ret) {
+ dev_err(adev->dev, "(%d) failed to reserve bo for GART system bo\n", ret);
+ goto error;
}
+
+ ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+ WARN(ret, "Pinning the GART table failed");
+ if (ret)
+ goto error_resv;
+
+ adev->gart.bo = bo;
+ adev->gart.ptr = page_to_virt(p);
+ /* Make GART table accessible in VMID0 */
+ ret = amdgpu_ttm_alloc_gart(&adev->gart.bo->tbo);
+ if (ret)
+ amdgpu_gart_table_ram_free(adev);
+ amdgpu_bo_unreserve(bo);
+
return 0;
+
+error_resv:
+ amdgpu_bo_unreserve(bo);
+error:
+ amdgpu_bo_unref(&bo);
+ if (sg) {
+ sg_free_table(sg);
+ kfree(sg);
+ }
+ __free_pages(p, order);
+ return ret;
}
/**
- * amdgpu_gart_table_vram_pin - pin gart page table in vram
+ * amdgpu_gart_table_ram_free - free gart page table system ram
*
* @adev: amdgpu_device pointer
*
- * Pin the GART page table in vram so it will not be moved
- * by the memory manager (pcie r4xx, r5xx+). These asics require the
- * gart table to be in video memory.
- * Returns 0 for success, error for failure.
+ * Free the system memory used for the GART page tableon ASICs that don't
+ * have dedicated VRAM.
*/
-int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
+void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
{
- uint64_t gpu_addr;
- int r;
+ unsigned int order = get_order(adev->gart.table_size);
+ struct sg_table *sg = adev->gart.bo->tbo.sg;
+ struct page *p;
+ unsigned long x;
+ int ret;
- r = amdgpu_bo_reserve(adev->gart.robj, false);
- if (unlikely(r != 0))
- return r;
- r = amdgpu_bo_pin(adev->gart.robj,
- AMDGPU_GEM_DOMAIN_VRAM, &gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(adev->gart.robj);
- return r;
+ ret = amdgpu_bo_reserve(adev->gart.bo, false);
+ if (!ret) {
+ amdgpu_bo_unpin(adev->gart.bo);
+ amdgpu_bo_unreserve(adev->gart.bo);
}
- r = amdgpu_bo_kmap(adev->gart.robj, &adev->gart.ptr);
- if (r)
- amdgpu_bo_unpin(adev->gart.robj);
- amdgpu_bo_unreserve(adev->gart.robj);
- adev->gart.table_addr = gpu_addr;
- return r;
+ amdgpu_bo_unref(&adev->gart.bo);
+ sg_free_table(sg);
+ kfree(sg);
+ p = virt_to_page(adev->gart.ptr);
+ for (x = 0; x < (1UL << order); x++)
+ p[x].mapping = NULL;
+ __free_pages(p, order);
+
+ adev->gart.ptr = NULL;
}
/**
- * amdgpu_gart_table_vram_unpin - unpin gart page table in vram
+ * amdgpu_gart_table_vram_alloc - allocate vram for gart page table
*
* @adev: amdgpu_device pointer
*
- * Unpin the GART page table in vram (pcie r4xx, r5xx+).
- * These asics require the gart table to be in video memory.
+ * Allocate video memory for GART page table
+ * (pcie r4xx, r5xx+). These asics require the
+ * gart table to be in video memory.
+ * Returns 0 for success, error for failure.
*/
-void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
+int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
{
- int r;
+ if (adev->gart.bo != NULL)
+ return 0;
- if (adev->gart.robj == NULL) {
- return;
- }
- r = amdgpu_bo_reserve(adev->gart.robj, true);
- if (likely(r == 0)) {
- amdgpu_bo_kunmap(adev->gart.robj);
- amdgpu_bo_unpin(adev->gart.robj);
- amdgpu_bo_unreserve(adev->gart.robj);
- adev->gart.ptr = NULL;
- }
+ return amdgpu_bo_create_kernel(adev, adev->gart.table_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, &adev->gart.bo,
+ NULL, (void *)&adev->gart.ptr);
}
/**
@@ -206,10 +281,7 @@ void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
*/
void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
{
- if (adev->gart.robj == NULL) {
- return;
- }
- amdgpu_bo_unref(&adev->gart.robj);
+ amdgpu_bo_free_kernel(&adev->gart.bo, NULL, (void *)&adev->gart.ptr);
}
/*
@@ -224,6 +296,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
*
* Unbinds the requested pages from the gart page table and
* replaces them with the dummy page (all asics).
+ * Returns 0 for success, -EINVAL for failure.
*/
void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
int pages)
@@ -234,77 +307,113 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
u64 page_base;
/* Starting from VEGA10, system bit must be 0 to mean invalid. */
uint64_t flags = 0;
+ int idx;
- if (!adev->gart.ready) {
- WARN(1, "trying to unbind memory from uninitialized GART !\n");
+ if (!adev->gart.ptr)
+ return;
+
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
return;
- }
t = offset / AMDGPU_GPU_PAGE_SIZE;
- p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+ p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
for (i = 0; i < pages; i++, p++) {
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- adev->gart.pages[p] = NULL;
-#endif
- page_base = adev->dummy_page.addr;
+ page_base = adev->dummy_page_addr;
if (!adev->gart.ptr)
continue;
- for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
- amdgpu_gart_set_pte_pde(adev, adev->gart.ptr,
- t, page_base, flags);
+ for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
+ amdgpu_gmc_set_pte_pde(adev, adev->gart.ptr,
+ t, page_base, flags);
page_base += AMDGPU_GPU_PAGE_SIZE;
}
}
- mb();
- amdgpu_gart_flush_gpu_tlb(adev, 0);
+ amdgpu_gart_invalidate_tlb(adev);
+
+ drm_dev_exit(idx);
}
/**
- * amdgpu_gart_bind - bind pages into the gart page table
+ * amdgpu_gart_map - map dma_addresses into GART entries
*
* @adev: amdgpu_device pointer
* @offset: offset into the GPU's gart aperture
* @pages: number of pages to bind
- * @pagelist: pages to bind
* @dma_addr: DMA addresses of pages
+ * @flags: page table entry flags
+ * @dst: CPU address of the gart table
*
- * Binds the requested pages to the gart page table
- * (all asics).
+ * Map the dma_addresses into GART entries (all asics).
* Returns 0 for success, -EINVAL for failure.
*/
-int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
- int pages, struct page **pagelist, dma_addr_t *dma_addr,
- uint64_t flags)
+void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
+ int pages, dma_addr_t *dma_addr, uint64_t flags,
+ void *dst)
{
- unsigned t;
- unsigned p;
uint64_t page_base;
- int i, j;
+ unsigned i, j, t;
+ int idx;
- if (!adev->gart.ready) {
- WARN(1, "trying to bind memory to uninitialized GART !\n");
- return -EINVAL;
- }
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
+ return;
t = offset / AMDGPU_GPU_PAGE_SIZE;
- p = t / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
- for (i = 0; i < pages; i++, p++) {
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- adev->gart.pages[p] = pagelist[i];
-#endif
- if (adev->gart.ptr) {
- page_base = dma_addr[i];
- for (j = 0; j < (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE); j++, t++) {
- amdgpu_gart_set_pte_pde(adev, adev->gart.ptr, t, page_base, flags);
- page_base += AMDGPU_GPU_PAGE_SIZE;
- }
+ for (i = 0; i < pages; i++) {
+ page_base = dma_addr[i];
+ for (j = 0; j < AMDGPU_GPU_PAGES_IN_CPU_PAGE; j++, t++) {
+ amdgpu_gmc_set_pte_pde(adev, dst, t, page_base, flags);
+ page_base += AMDGPU_GPU_PAGE_SIZE;
}
}
+ drm_dev_exit(idx);
+}
+
+/**
+ * amdgpu_gart_bind - bind pages into the gart page table
+ *
+ * @adev: amdgpu_device pointer
+ * @offset: offset into the GPU's gart aperture
+ * @pages: number of pages to bind
+ * @dma_addr: DMA addresses of pages
+ * @flags: page table entry flags
+ *
+ * Binds the requested pages to the gart page table
+ * (all asics).
+ * Returns 0 for success, -EINVAL for failure.
+ */
+void amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
+ int pages, dma_addr_t *dma_addr,
+ uint64_t flags)
+{
+ if (!adev->gart.ptr)
+ return;
+
+ amdgpu_gart_map(adev, offset, pages, dma_addr, flags, adev->gart.ptr);
+}
+
+/**
+ * amdgpu_gart_invalidate_tlb - invalidate gart TLB
+ *
+ * @adev: amdgpu device driver pointer
+ *
+ * Invalidate gart TLB which can be use as a way to flush gart changes
+ *
+ */
+void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev)
+{
+ int i;
+
+ if (!adev->gart.ptr)
+ return;
+
mb();
- amdgpu_gart_flush_gpu_tlb(adev, 0);
- return 0;
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_device_flush_hdp(adev, NULL);
+ up_read(&adev->reset_domain->sem);
+ }
+ for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS)
+ amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
}
/**
@@ -319,7 +428,7 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
{
int r;
- if (adev->dummy_page.page)
+ if (adev->dummy_page_addr)
return 0;
/* We need PAGE_SIZE >= AMDGPU_GPU_PAGE_SIZE */
@@ -327,44 +436,14 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
DRM_ERROR("Page size is smaller than GPU page size!\n");
return -EINVAL;
}
- r = amdgpu_dummy_page_init(adev);
+ r = amdgpu_gart_dummy_page_init(adev);
if (r)
return r;
/* Compute table size */
- adev->gart.num_cpu_pages = adev->mc.gtt_size / PAGE_SIZE;
- adev->gart.num_gpu_pages = adev->mc.gtt_size / AMDGPU_GPU_PAGE_SIZE;
+ adev->gart.num_cpu_pages = adev->gmc.gart_size / PAGE_SIZE;
+ adev->gart.num_gpu_pages = adev->gmc.gart_size / AMDGPU_GPU_PAGE_SIZE;
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- /* Allocate pages table */
- adev->gart.pages = vzalloc(sizeof(void *) * adev->gart.num_cpu_pages);
- if (adev->gart.pages == NULL) {
- amdgpu_gart_fini(adev);
- return -ENOMEM;
- }
-#endif
-
return 0;
}
-
-/**
- * amdgpu_gart_fini - tear down the driver info for managing the gart
- *
- * @adev: amdgpu_device pointer
- *
- * Tear down the gart driver info and free the dummy page (all asics).
- */
-void amdgpu_gart_fini(struct amdgpu_device *adev)
-{
- if (adev->gart.ready) {
- /* unbind pages */
- amdgpu_gart_unbind(adev, 0, adev->gart.num_cpu_pages);
- }
- adev->gart.ready = false;
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- vfree(adev->gart.pages);
- adev->gart.pages = NULL;
-#endif
- amdgpu_dummy_page_fini(adev);
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
new file mode 100644
index 000000000000..7cc980bf4725
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_GART_H__
+#define __AMDGPU_GART_H__
+
+#include <linux/types.h>
+
+/*
+ * GART structures, functions & helpers
+ */
+struct amdgpu_device;
+struct amdgpu_bo;
+
+#define AMDGPU_GPU_PAGE_SIZE 4096
+#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
+#define AMDGPU_GPU_PAGE_SHIFT 12
+#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
+
+#define AMDGPU_GPU_PAGES_IN_CPU_PAGE (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE)
+
+struct amdgpu_gart {
+ struct amdgpu_bo *bo;
+ /* CPU kmapped address of gart table */
+ void *ptr;
+ unsigned num_gpu_pages;
+ unsigned num_cpu_pages;
+ unsigned table_size;
+
+ /* Asic default pte flags */
+ uint64_t gart_pte_flags;
+};
+
+int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
+void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
+int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
+void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
+int amdgpu_gart_init(struct amdgpu_device *adev);
+void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev);
+void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
+ int pages);
+void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
+ int pages, dma_addr_t *dma_addr, uint64_t flags,
+ void *dst);
+void amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
+ int pages, dma_addr_t *dma_addr, uint64_t flags);
+void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
index e73728d90388..f6ac1e9548f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gds.h
@@ -24,34 +24,14 @@
#ifndef __AMDGPU_GDS_H__
#define __AMDGPU_GDS_H__
-/* Because TTM request that alloacted buffer should be PAGE_SIZE aligned,
- * we should report GDS/GWS/OA size as PAGE_SIZE aligned
- * */
-#define AMDGPU_GDS_SHIFT 2
-#define AMDGPU_GWS_SHIFT PAGE_SHIFT
-#define AMDGPU_OA_SHIFT PAGE_SHIFT
-
struct amdgpu_ring;
struct amdgpu_bo;
-struct amdgpu_gds_asic_info {
- uint32_t total_size;
- uint32_t gfx_partition_size;
- uint32_t cs_partition_size;
-};
-
struct amdgpu_gds {
- struct amdgpu_gds_asic_info mem;
- struct amdgpu_gds_asic_info gws;
- struct amdgpu_gds_asic_info oa;
- /* At present, GDS, GWS and OA resources for gfx (graphics)
- * is always pre-allocated and available for graphics operation.
- * Such resource is shared between all gfx clients.
- * TODO: move this operation to user space
- * */
- struct amdgpu_bo* gds_gfx_bo;
- struct amdgpu_bo* gws_gfx_bo;
- struct amdgpu_bo* oa_gfx_bo;
+ uint32_t gds_size;
+ uint32_t gws_size;
+ uint32_t oa_size;
+ uint32_t gds_compute_max_wave_id;
};
struct amdgpu_gds_reg_offset {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 94cb91cf93eb..b7ebae289bea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -26,71 +26,219 @@
* Jerome Glisse
*/
#include <linux/ktime.h>
+#include <linux/module.h>
#include <linux/pagemap.h>
-#include <drm/drmP.h>
+#include <linux/pci.h>
+#include <linux/dma-buf.h>
+
#include <drm/amdgpu_drm.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_exec.h>
+#include <drm/drm_gem_ttm_helper.h>
+#include <drm/ttm/ttm_tt.h>
+#include <drm/drm_syncobj.h>
+
#include "amdgpu.h"
+#include "amdgpu_display.h"
+#include "amdgpu_dma_buf.h"
+#include "amdgpu_hmm.h"
+#include "amdgpu_xgmi.h"
+#include "amdgpu_vm.h"
+
+static int
+amdgpu_gem_add_input_fence(struct drm_file *filp,
+ uint64_t syncobj_handles_array,
+ uint32_t num_syncobj_handles)
+{
+ struct dma_fence *fence;
+ uint32_t *syncobj_handles;
+ int ret, i;
+
+ if (!num_syncobj_handles)
+ return 0;
+
+ syncobj_handles = memdup_user(u64_to_user_ptr(syncobj_handles_array),
+ size_mul(sizeof(uint32_t), num_syncobj_handles));
+ if (IS_ERR(syncobj_handles))
+ return PTR_ERR(syncobj_handles);
+
+ for (i = 0; i < num_syncobj_handles; i++) {
+
+ if (!syncobj_handles[i]) {
+ ret = -EINVAL;
+ goto free_memdup;
+ }
+
+ ret = drm_syncobj_find_fence(filp, syncobj_handles[i], 0, 0, &fence);
+ if (ret)
+ goto free_memdup;
-void amdgpu_gem_object_free(struct drm_gem_object *gobj)
+ dma_fence_wait(fence, false);
+
+ /* TODO: optimize async handling */
+ dma_fence_put(fence);
+ }
+
+free_memdup:
+ kfree(syncobj_handles);
+ return ret;
+}
+
+static int
+amdgpu_gem_update_timeline_node(struct drm_file *filp,
+ uint32_t syncobj_handle,
+ uint64_t point,
+ struct drm_syncobj **syncobj,
+ struct dma_fence_chain **chain)
{
- struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
+ if (!syncobj_handle)
+ return 0;
+
+ /* Find the sync object */
+ *syncobj = drm_syncobj_find(filp, syncobj_handle);
+ if (!*syncobj)
+ return -ENOENT;
+
+ if (!point)
+ return 0;
- if (robj) {
- if (robj->gem_base.import_attach)
- drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
- amdgpu_mn_unregister(robj);
- amdgpu_bo_unref(&robj);
+ /* Allocate the chain node */
+ *chain = dma_fence_chain_alloc();
+ if (!*chain) {
+ drm_syncobj_put(*syncobj);
+ return -ENOMEM;
}
+
+ return 0;
}
-int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
- int alignment, u32 initial_domain,
- u64 flags, bool kernel,
- struct drm_gem_object **obj)
+static void
+amdgpu_gem_update_bo_mapping(struct drm_file *filp,
+ struct amdgpu_bo_va *bo_va,
+ uint32_t operation,
+ uint64_t point,
+ struct dma_fence *fence,
+ struct drm_syncobj *syncobj,
+ struct dma_fence_chain *chain)
{
- struct amdgpu_bo *robj;
- unsigned long max_size;
- int r;
+ struct amdgpu_bo *bo = bo_va ? bo_va->base.bo : NULL;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
+ struct dma_fence *last_update;
- *obj = NULL;
- /* At least align on page size */
- if (alignment < PAGE_SIZE) {
- alignment = PAGE_SIZE;
+ if (!syncobj)
+ return;
+
+ /* Find the last update fence */
+ switch (operation) {
+ case AMDGPU_VA_OP_MAP:
+ case AMDGPU_VA_OP_REPLACE:
+ if (bo && (bo->tbo.base.resv == vm->root.bo->tbo.base.resv))
+ last_update = vm->last_update;
+ else
+ last_update = bo_va->last_pt_update;
+ break;
+ case AMDGPU_VA_OP_UNMAP:
+ case AMDGPU_VA_OP_CLEAR:
+ last_update = fence;
+ break;
+ default:
+ return;
}
- if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
- /* Maximum bo size is the unpinned gtt size since we use the gtt to
- * handle vram to system pool migrations.
- */
- max_size = adev->mc.gtt_size - adev->gart_pin_size;
- if (size > max_size) {
- DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
- size >> 20, max_size >> 20);
- return -ENOMEM;
+ /* Add fence to timeline */
+ if (!point)
+ drm_syncobj_replace_fence(syncobj, last_update);
+ else
+ drm_syncobj_add_point(syncobj, chain, last_update, point);
+}
+
+static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
+{
+ struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
+ struct drm_device *ddev = bo->base.dev;
+ vm_fault_t ret;
+ int idx;
+
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ return ret;
+
+ if (drm_dev_enter(ddev, &idx)) {
+ ret = amdgpu_bo_fault_reserve_notify(bo);
+ if (ret) {
+ drm_dev_exit(idx);
+ goto unlock;
}
+
+ ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+ TTM_BO_VM_NUM_PREFAULT);
+
+ drm_dev_exit(idx);
+ } else {
+ ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
}
-retry:
- r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
- flags, NULL, NULL, &robj);
- if (r) {
- if (r != -ERESTARTSYS) {
- if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
- initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
- goto retry;
- }
- DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
- size, initial_domain, alignment, r);
- }
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
+
+unlock:
+ dma_resv_unlock(bo->base.resv);
+ return ret;
+}
+
+static const struct vm_operations_struct amdgpu_gem_vm_ops = {
+ .fault = amdgpu_gem_fault,
+ .open = ttm_bo_vm_open,
+ .close = ttm_bo_vm_close,
+ .access = ttm_bo_vm_access
+};
+
+static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
+{
+ struct amdgpu_bo *aobj = gem_to_amdgpu_bo(gobj);
+
+ amdgpu_hmm_unregister(aobj);
+ ttm_bo_put(&aobj->tbo);
+}
+
+int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
+ int alignment, u32 initial_domain,
+ u64 flags, enum ttm_bo_type type,
+ struct dma_resv *resv,
+ struct drm_gem_object **obj, int8_t xcp_id_plus1)
+{
+ struct amdgpu_bo *bo;
+ struct amdgpu_bo_user *ubo;
+ struct amdgpu_bo_param bp;
+ int r;
+
+ memset(&bp, 0, sizeof(bp));
+ *obj = NULL;
+ flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
+
+ bp.size = size;
+ bp.byte_align = alignment;
+ bp.type = type;
+ bp.resv = resv;
+ bp.preferred_domain = initial_domain;
+ bp.flags = flags;
+ bp.domain = initial_domain;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+ bp.xcp_id_plus1 = xcp_id_plus1;
+
+ r = amdgpu_bo_create_user(adev, &bp, &ubo);
+ if (r)
return r;
- }
- *obj = &robj->gem_base;
+
+ bo = &ubo->bo;
+ *obj = &bo->tbo.base;
return 0;
}
void amdgpu_gem_force_release(struct amdgpu_device *adev)
{
- struct drm_device *ddev = adev->ddev;
+ struct drm_device *ddev = adev_to_drm(adev);
struct drm_file *file;
mutex_lock(&ddev->filelist_mutex);
@@ -103,7 +251,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
spin_lock(&file->table_lock);
idr_for_each_entry(&file->object_idr, gobj, handle) {
WARN_ONCE(1, "And also active allocations!\n");
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put(gobj);
}
idr_destroy(&file->object_idr);
spin_unlock(&file->table_lock);
@@ -116,203 +264,280 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
* Call from drm_gem_handle_create which appear in both new and open ioctl
* case.
*/
-int amdgpu_gem_object_open(struct drm_gem_object *obj,
- struct drm_file *file_priv)
+static int amdgpu_gem_object_open(struct drm_gem_object *obj,
+ struct drm_file *file_priv)
{
struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va;
+ struct mm_struct *mm;
int r;
+
+ mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
+ if (mm && mm != current->mm)
+ return -EPERM;
+
+ if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
+ !amdgpu_vm_is_bo_always_valid(vm, abo))
+ return -EPERM;
+
r = amdgpu_bo_reserve(abo, false);
if (r)
return r;
+ amdgpu_vm_bo_update_shared(abo);
bo_va = amdgpu_vm_bo_find(vm, abo);
- if (!bo_va) {
+ if (!bo_va)
bo_va = amdgpu_vm_bo_add(adev, vm, abo);
- } else {
+ else
++bo_va->ref_count;
- }
- amdgpu_bo_unreserve(abo);
- return 0;
-}
-
-static int amdgpu_gem_vm_check(void *param, struct amdgpu_bo *bo)
-{
- /* if anything is swapped out don't swap it in here,
- just abort and wait for the next CS */
- if (!amdgpu_bo_gpu_accessible(bo))
- return -ERESTARTSYS;
- if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
- return -ERESTARTSYS;
-
- return 0;
-}
+ /* attach gfx eviction fence */
+ r = amdgpu_eviction_fence_attach(&fpriv->evf_mgr, abo);
+ if (r) {
+ DRM_DEBUG_DRIVER("Failed to attach eviction fence to BO\n");
+ amdgpu_bo_unreserve(abo);
+ return r;
+ }
-static bool amdgpu_gem_vm_ready(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct list_head *list)
-{
- struct ttm_validate_buffer *entry;
+ amdgpu_bo_unreserve(abo);
- list_for_each_entry(entry, list, head) {
- struct amdgpu_bo *bo =
- container_of(entry->bo, struct amdgpu_bo, tbo);
- if (amdgpu_gem_vm_check(NULL, bo))
- return false;
+ /* Validate and add eviction fence to DMABuf imports with dynamic
+ * attachment in compute VMs. Re-validation will be done by
+ * amdgpu_vm_validate. Fences are on the reservation shared with the
+ * export, which is currently required to be validated and fenced
+ * already by amdgpu_amdkfd_gpuvm_restore_process_bos.
+ *
+ * Nested locking below for the case that a GEM object is opened in
+ * kfd_mem_export_dmabuf. Since the lock below is only taken for imports,
+ * but not for export, this is a different lock class that cannot lead to
+ * circular lock dependencies.
+ */
+ if (!vm->is_compute_context || !vm->process_info)
+ return 0;
+ if (!drm_gem_is_imported(obj) ||
+ !dma_buf_is_dynamic(obj->import_attach->dmabuf))
+ return 0;
+ mutex_lock_nested(&vm->process_info->lock, 1);
+ if (!WARN_ON(!vm->process_info->eviction_fence)) {
+ r = amdgpu_amdkfd_bo_validate_and_fence(abo, AMDGPU_GEM_DOMAIN_GTT,
+ &vm->process_info->eviction_fence->base);
+ if (r) {
+ struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
+
+ dev_warn(adev->dev, "validate_and_fence failed: %d\n", r);
+ if (ti) {
+ dev_warn(adev->dev, "pid %d\n", ti->task.pid);
+ amdgpu_vm_put_task_info(ti);
+ }
+ }
}
+ mutex_unlock(&vm->process_info->lock);
- return !amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_gem_vm_check, NULL);
+ return r;
}
-void amdgpu_gem_object_close(struct drm_gem_object *obj,
- struct drm_file *file_priv)
+static void amdgpu_gem_object_close(struct drm_gem_object *obj,
+ struct drm_file *file_priv)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
- struct amdgpu_bo_list_entry vm_pd;
- struct list_head list;
- struct ttm_validate_buffer tv;
- struct ww_acquire_ctx ticket;
+ struct dma_fence *fence = NULL;
struct amdgpu_bo_va *bo_va;
- int r;
-
- INIT_LIST_HEAD(&list);
-
- tv.bo = &bo->tbo;
- tv.shared = true;
- list_add(&tv.head, &list);
+ struct drm_exec exec;
+ long r;
+
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto out_unlock;
+
+ r = amdgpu_vm_lock_pd(vm, &exec, 0);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto out_unlock;
+ }
- amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
+ if (!amdgpu_vm_is_bo_always_valid(vm, bo))
+ amdgpu_eviction_fence_detach(&fpriv->evf_mgr, bo);
- r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
- if (r) {
- dev_err(adev->dev, "leaking bo va because "
- "we fail to reserve bo (%d)\n", r);
- return;
- }
bo_va = amdgpu_vm_bo_find(vm, bo);
- if (bo_va && --bo_va->ref_count == 0) {
- amdgpu_vm_bo_rmv(adev, bo_va);
+ if (!bo_va || --bo_va->ref_count)
+ goto out_unlock;
- if (amdgpu_gem_vm_ready(adev, vm, &list)) {
- struct dma_fence *fence = NULL;
+ amdgpu_vm_bo_del(adev, bo_va);
+ amdgpu_vm_bo_update_shared(bo);
+ if (!amdgpu_vm_ready(vm))
+ goto out_unlock;
- r = amdgpu_vm_clear_freed(adev, vm, &fence);
- if (unlikely(r)) {
- dev_err(adev->dev, "failed to clear page "
- "tables on GEM object close (%d)\n", r);
- }
+ r = amdgpu_vm_clear_freed(adev, vm, &fence);
+ if (unlikely(r < 0))
+ dev_err(adev->dev, "failed to clear page "
+ "tables on GEM object close (%ld)\n", r);
+ if (r || !fence)
+ goto out_unlock;
- if (fence) {
- amdgpu_bo_fence(bo, fence, true);
- dma_fence_put(fence);
- }
- }
- }
- ttm_eu_backoff_reservation(&ticket, &list);
+ amdgpu_bo_fence(bo, fence, true);
+ dma_fence_put(fence);
+
+out_unlock:
+ if (r)
+ dev_err(adev->dev, "leaking bo va (%ld)\n", r);
+ drm_exec_fini(&exec);
}
-static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
+static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
- if (r == -EDEADLK) {
- r = amdgpu_gpu_reset(adev);
- if (!r)
- r = -EAGAIN;
- }
- return r;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
+ return -EPERM;
+ if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
+ return -EPERM;
+
+ /* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings
+ * for debugger access to invisible VRAM. Should have used MAP_SHARED
+ * instead. Clearing VM_MAYWRITE prevents the mapping from ever
+ * becoming writable and makes is_cow_mapping(vm_flags) false.
+ */
+ if (is_cow_mapping(vma->vm_flags) &&
+ !(vma->vm_flags & VM_ACCESS_FLAGS))
+ vm_flags_clear(vma, VM_MAYWRITE);
+
+ return drm_gem_ttm_mmap(obj, vma);
}
+const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
+ .free = amdgpu_gem_object_free,
+ .open = amdgpu_gem_object_open,
+ .close = amdgpu_gem_object_close,
+ .export = amdgpu_gem_prime_export,
+ .vmap = drm_gem_ttm_vmap,
+ .vunmap = drm_gem_ttm_vunmap,
+ .mmap = amdgpu_gem_object_mmap,
+ .vm_ops = &amdgpu_gem_vm_ops,
+};
+
/*
* GEM ioctls.
*/
int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
union drm_amdgpu_gem_create *args = data;
+ uint64_t flags = args->in.domain_flags;
uint64_t size = args->in.bo_size;
+ struct dma_resv *resv = NULL;
struct drm_gem_object *gobj;
- uint32_t handle;
- bool kernel = false;
+ uint32_t handle, initial_domain;
int r;
/* reject invalid gem flags */
- if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_CPU_GTT_USWC |
- AMDGPU_GEM_CREATE_VRAM_CLEARED|
- AMDGPU_GEM_CREATE_SHADOW |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
- r = -EINVAL;
- goto error_unlock;
- }
+ if (flags & ~AMDGPU_GEM_CREATE_SETTABLE_MASK)
+ return -EINVAL;
+
/* reject invalid gem domains */
- if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU |
- AMDGPU_GEM_DOMAIN_GTT |
- AMDGPU_GEM_DOMAIN_VRAM |
- AMDGPU_GEM_DOMAIN_GDS |
- AMDGPU_GEM_DOMAIN_GWS |
- AMDGPU_GEM_DOMAIN_OA)) {
- r = -EINVAL;
- goto error_unlock;
+ if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
+ return -EINVAL;
+
+ if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
+ DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
+ return -EINVAL;
}
+ /* always clear VRAM */
+ flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
+
+ if (args->in.domains & AMDGPU_GEM_DOMAIN_MMIO_REMAP)
+ return -EINVAL;
+
/* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
- kernel = true;
- if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
- size = size << AMDGPU_GDS_SHIFT;
- else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
- size = size << AMDGPU_GWS_SHIFT;
- else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA)
- size = size << AMDGPU_OA_SHIFT;
- else {
- r = -EINVAL;
- goto error_unlock;
+ if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
+ /* if gds bo is created from user space, it must be
+ * passed to bo list
+ */
+ DRM_ERROR("GDS bo cannot be per-vm-bo\n");
+ return -EINVAL;
}
+ flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
}
- size = roundup(size, PAGE_SIZE);
+ if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
+ r = amdgpu_bo_reserve(vm->root.bo, false);
+ if (r)
+ return r;
+
+ resv = vm->root.bo->tbo.base.resv;
+ }
+
+ initial_domain = (u32)(0xffffffff & args->in.domains);
+retry:
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
- (u32)(0xffffffff & args->in.domains),
- args->in.domain_flags,
- kernel, &gobj);
+ initial_domain,
+ flags, ttm_bo_type_device, resv, &gobj, fpriv->xcp_id + 1);
+ if (r && r != -ERESTARTSYS) {
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+ flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ goto retry;
+ }
+
+ if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
+ initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
+ goto retry;
+ }
+ DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
+ size, initial_domain, args->in.alignment, r);
+ }
+
+ if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
+ if (!r) {
+ struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
+
+ abo->parent = amdgpu_bo_ref(vm->root.bo);
+ }
+ amdgpu_bo_unreserve(vm->root.bo);
+ }
if (r)
- goto error_unlock;
+ return r;
r = drm_gem_handle_create(filp, gobj, &handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put(gobj);
if (r)
- goto error_unlock;
+ return r;
memset(args, 0, sizeof(*args));
args->out.handle = handle;
return 0;
-
-error_unlock:
- r = amdgpu_gem_handle_lockup(adev, r);
- return r;
}
int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct ttm_operation_ctx ctx = { true, false };
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_amdgpu_gem_userptr *args = data;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct drm_gem_object *gobj;
+ struct hmm_range *range;
struct amdgpu_bo *bo;
uint32_t handle;
int r;
+ args->addr = untagged_addr(args->addr);
+
if (offset_in_page(args->addr | args->size))
return -EINVAL;
@@ -330,66 +555,52 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
}
/* create a gem object to contain this object in */
- r = amdgpu_gem_object_create(adev, args->size, 0,
- AMDGPU_GEM_DOMAIN_CPU, 0,
- 0, &gobj);
+ r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
+ 0, ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1);
if (r)
- goto handle_lockup;
+ return r;
bo = gem_to_amdgpu_bo(gobj);
- bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
+ bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
- r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
+ r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags);
if (r)
goto release_object;
- if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) {
- r = amdgpu_mn_register(bo, args->addr);
- if (r)
- goto release_object;
- }
+ r = amdgpu_hmm_register(bo, args->addr);
+ if (r)
+ goto release_object;
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
- down_read(&current->mm->mmap_sem);
-
- r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
- bo->tbo.ttm->pages);
+ r = amdgpu_ttm_tt_get_user_pages(bo, &range);
if (r)
- goto unlock_mmap_sem;
+ goto release_object;
r = amdgpu_bo_reserve(bo, true);
if (r)
- goto free_pages;
+ goto user_pages_done;
+
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, range);
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo);
if (r)
- goto free_pages;
-
- up_read(&current->mm->mmap_sem);
+ goto user_pages_done;
}
r = drm_gem_handle_create(filp, gobj, &handle);
- /* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(gobj);
if (r)
- goto handle_lockup;
+ goto user_pages_done;
args->handle = handle;
- return 0;
-free_pages:
- release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false);
-
-unlock_mmap_sem:
- up_read(&current->mm->mmap_sem);
+user_pages_done:
+ if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
+ amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
release_object:
- drm_gem_object_unreference_unlocked(gobj);
-
-handle_lockup:
- r = amdgpu_gem_handle_lockup(adev, r);
+ drm_gem_object_put(gobj);
return r;
}
@@ -402,17 +613,17 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp,
struct amdgpu_bo *robj;
gobj = drm_gem_object_lookup(filp, handle);
- if (gobj == NULL) {
+ if (!gobj)
return -ENOENT;
- }
+
robj = gem_to_amdgpu_bo(gobj);
if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
(robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put(gobj);
return -EPERM;
}
*offset_p = amdgpu_bo_mmap_offset(robj);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put(gobj);
return 0;
}
@@ -421,6 +632,7 @@ int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
{
union drm_amdgpu_gem_mmap *args = data;
uint32_t handle = args->in.handle;
+
memset(args, 0, sizeof(*args));
return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr);
}
@@ -447,7 +659,7 @@ unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout));
/* clamp timeout to avoid unsigned-> signed overflow */
- if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT )
+ if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT)
return MAX_SCHEDULE_TIMEOUT - 1;
return timeout_jiffies;
@@ -456,7 +668,6 @@ unsigned long amdgpu_gem_timeout(uint64_t timeout_ns)
int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
- struct amdgpu_device *adev = dev->dev_private;
union drm_amdgpu_gem_wait_idle *args = data;
struct drm_gem_object *gobj;
struct amdgpu_bo *robj;
@@ -466,12 +677,12 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
long ret;
gobj = drm_gem_object_lookup(filp, handle);
- if (gobj == NULL) {
+ if (!gobj)
return -ENOENT;
- }
+
robj = gem_to_amdgpu_bo(gobj);
- ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
- timeout);
+ ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
+ true, timeout);
/* ret == 0 means not signaled,
* ret > 0 means signaled
@@ -483,8 +694,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
} else
r = ret;
- drm_gem_object_unreference_unlocked(gobj);
- r = amdgpu_gem_handle_lockup(adev, r);
+ drm_gem_object_put(gobj);
return r;
}
@@ -496,7 +706,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
struct amdgpu_bo *robj;
int r = -1;
- DRM_DEBUG("%d \n", args->handle);
+ DRM_DEBUG("%d\n", args->handle);
gobj = drm_gem_object_lookup(filp, args->handle);
if (gobj == NULL)
return -ENOENT;
@@ -527,7 +737,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
unreserve:
amdgpu_bo_unreserve(robj);
out:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put(gobj);
return r;
}
@@ -537,38 +747,44 @@ out:
* @adev: amdgpu_device pointer
* @vm: vm to update
* @bo_va: bo_va to update
- * @list: validation list
* @operation: map, unmap or clear
*
* Update the bo_va directly after setting its address. Errors are not
* vital here, so they are not reported back to userspace.
+ *
+ * Returns resulting fence if freed BO(s) got cleared from the PT.
+ * otherwise stub fence in case of error.
*/
-static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_bo_va *bo_va,
- struct list_head *list,
- uint32_t operation)
+static struct dma_fence *
+amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo_va *bo_va,
+ uint32_t operation)
{
- int r = -ERESTARTSYS;
-
- if (!amdgpu_gem_vm_ready(adev, vm, list))
- goto error;
+ struct dma_fence *fence = dma_fence_get_stub();
+ int r;
- r = amdgpu_vm_update_directories(adev, vm);
- if (r)
- goto error;
+ if (!amdgpu_vm_ready(vm))
+ return fence;
- r = amdgpu_vm_clear_freed(adev, vm, NULL);
+ r = amdgpu_vm_clear_freed(adev, vm, &fence);
if (r)
goto error;
if (operation == AMDGPU_VA_OP_MAP ||
- operation == AMDGPU_VA_OP_REPLACE)
+ operation == AMDGPU_VA_OP_REPLACE) {
r = amdgpu_vm_bo_update(adev, bo_va, false);
+ if (r)
+ goto error;
+ }
+
+ r = amdgpu_vm_update_pdes(adev, vm, false);
error:
if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
+
+ return fence;
}
int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
@@ -576,36 +792,53 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
{
const uint32_t valid_flags = AMDGPU_VM_DELAY_UPDATE |
AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE |
- AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK;
+ AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_MASK |
+ AMDGPU_VM_PAGE_NOALLOC;
const uint32_t prt_flags = AMDGPU_VM_DELAY_UPDATE |
AMDGPU_VM_PAGE_PRT;
struct drm_amdgpu_gem_va *args = data;
struct drm_gem_object *gobj;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_bo *abo;
struct amdgpu_bo_va *bo_va;
- struct amdgpu_bo_list_entry vm_pd;
- struct ttm_validate_buffer tv;
- struct ww_acquire_ctx ticket;
- struct list_head list;
- uint64_t va_flags;
+ struct drm_syncobj *timeline_syncobj = NULL;
+ struct dma_fence_chain *timeline_chain = NULL;
+ struct dma_fence *fence;
+ struct drm_exec exec;
+ uint64_t vm_size;
int r = 0;
- if (!adev->vm_manager.enabled)
- return -ENOTTY;
+ if (args->va_address < AMDGPU_VA_RESERVED_BOTTOM) {
+ dev_dbg(dev->dev,
+ "va_address 0x%llx is in reserved area 0x%llx\n",
+ args->va_address, AMDGPU_VA_RESERVED_BOTTOM);
+ return -EINVAL;
+ }
+
+ if (args->va_address >= AMDGPU_GMC_HOLE_START &&
+ args->va_address < AMDGPU_GMC_HOLE_END) {
+ dev_dbg(dev->dev,
+ "va_address 0x%llx is in VA hole 0x%llx-0x%llx\n",
+ args->va_address, AMDGPU_GMC_HOLE_START,
+ AMDGPU_GMC_HOLE_END);
+ return -EINVAL;
+ }
+
+ args->va_address &= AMDGPU_GMC_HOLE_MASK;
- if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
- dev_err(&dev->pdev->dev,
- "va_address 0x%lX is in reserved area 0x%X\n",
- (unsigned long)args->va_address,
- AMDGPU_VA_RESERVED_SIZE);
+ vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
+ vm_size -= AMDGPU_VA_RESERVED_TOP;
+ if (args->va_address + args->map_size > vm_size) {
+ dev_dbg(dev->dev,
+ "va_address 0x%llx is in top reserved area 0x%llx\n",
+ args->va_address + args->map_size, vm_size);
return -EINVAL;
}
if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
- dev_err(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
+ dev_dbg(dev->dev, "invalid flags combination 0x%08X\n",
args->flags);
return -EINVAL;
}
@@ -617,37 +850,49 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
case AMDGPU_VA_OP_REPLACE:
break;
default:
- dev_err(&dev->pdev->dev, "unsupported operation %d\n",
+ dev_dbg(dev->dev, "unsupported operation %d\n",
args->operation);
return -EINVAL;
}
- INIT_LIST_HEAD(&list);
if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
!(args->flags & AMDGPU_VM_PAGE_PRT)) {
gobj = drm_gem_object_lookup(filp, args->handle);
if (gobj == NULL)
return -ENOENT;
abo = gem_to_amdgpu_bo(gobj);
- tv.bo = &abo->tbo;
- tv.shared = false;
- list_add(&tv.head, &list);
} else {
gobj = NULL;
abo = NULL;
}
- amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
-
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+ r = amdgpu_gem_add_input_fence(filp,
+ args->input_fence_syncobj_handles,
+ args->num_syncobj_handles);
if (r)
- goto error_unref;
+ goto error_put_gobj;
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&exec) {
+ if (gobj) {
+ r = drm_exec_lock_obj(&exec, gobj);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto error;
+ }
+
+ r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 2);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto error;
+ }
if (abo) {
bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
if (!bo_va) {
r = -ENOENT;
- goto error_backoff;
+ goto error;
}
} else if (args->operation != AMDGPU_VA_OP_CLEAR) {
bo_va = fpriv->prt_va;
@@ -655,17 +900,19 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
bo_va = NULL;
}
+ r = amdgpu_gem_update_timeline_node(filp,
+ args->vm_timeline_syncobj_out,
+ args->vm_timeline_point,
+ &timeline_syncobj,
+ &timeline_chain);
+ if (r)
+ goto error;
+
switch (args->operation) {
case AMDGPU_VA_OP_MAP:
- r = amdgpu_vm_alloc_pts(adev, bo_va->vm, args->va_address,
- args->map_size);
- if (r)
- goto error_backoff;
-
- va_flags = amdgpu_vm_get_pte_flags(adev, args->flags);
r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
- va_flags);
+ args->flags);
break;
case AMDGPU_VA_OP_UNMAP:
r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address);
@@ -677,28 +924,32 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->map_size);
break;
case AMDGPU_VA_OP_REPLACE:
- r = amdgpu_vm_alloc_pts(adev, bo_va->vm, args->va_address,
- args->map_size);
- if (r)
- goto error_backoff;
-
- va_flags = amdgpu_vm_get_pte_flags(adev, args->flags);
r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
- va_flags);
+ args->flags);
break;
default:
break;
}
- if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !amdgpu_vm_debug)
- amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va, &list,
- args->operation);
+ if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && !adev->debug_vm) {
+ fence = amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
+ args->operation);
+
+ if (timeline_syncobj)
+ amdgpu_gem_update_bo_mapping(filp, bo_va,
+ args->operation,
+ args->vm_timeline_point,
+ fence, timeline_syncobj,
+ timeline_chain);
+ else
+ dma_fence_put(fence);
-error_backoff:
- ttm_eu_backoff_reservation(&ticket, &list);
+ }
-error_unref:
- drm_gem_object_unreference_unlocked(gobj);
+error:
+ drm_exec_fini(&exec);
+error_put_gobj:
+ drm_gem_object_put(gobj);
return r;
}
@@ -707,135 +958,294 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
{
struct drm_amdgpu_gem_op *args = data;
struct drm_gem_object *gobj;
+ struct amdgpu_vm_bo_base *base;
struct amdgpu_bo *robj;
+ struct drm_exec exec;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
int r;
+ if (args->padding)
+ return -EINVAL;
+
gobj = drm_gem_object_lookup(filp, args->handle);
- if (gobj == NULL) {
+ if (!gobj)
return -ENOENT;
- }
+
robj = gem_to_amdgpu_bo(gobj);
- r = amdgpu_bo_reserve(robj, false);
- if (unlikely(r))
- goto out;
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT |
+ DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_lock_obj(&exec, gobj);
+ drm_exec_retry_on_contention(&exec);
+ if (r)
+ goto out_exec;
+
+ if (args->op == AMDGPU_GEM_OP_GET_MAPPING_INFO) {
+ r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 0);
+ drm_exec_retry_on_contention(&exec);
+ if (r)
+ goto out_exec;
+ }
+ }
switch (args->op) {
case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
struct drm_amdgpu_gem_create_in info;
- void __user *out = (void __user *)(uintptr_t)args->value;
+ void __user *out = u64_to_user_ptr(args->value);
- info.bo_size = robj->gem_base.size;
- info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
- info.domains = robj->prefered_domains;
+ info.bo_size = robj->tbo.base.size;
+ info.alignment = robj->tbo.page_alignment << PAGE_SHIFT;
+ info.domains = robj->preferred_domains;
info.domain_flags = robj->flags;
- amdgpu_bo_unreserve(robj);
+ drm_exec_fini(&exec);
if (copy_to_user(out, &info, sizeof(info)))
r = -EFAULT;
break;
}
case AMDGPU_GEM_OP_SET_PLACEMENT:
- if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
+ if (drm_gem_is_imported(&robj->tbo.base) &&
+ args->value & AMDGPU_GEM_DOMAIN_VRAM) {
r = -EINVAL;
- amdgpu_bo_unreserve(robj);
- break;
+ goto out_exec;
}
if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
r = -EPERM;
- amdgpu_bo_unreserve(robj);
- break;
+ goto out_exec;
}
- robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
+ for (base = robj->vm_bo; base; base = base->next)
+ if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
+ amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) {
+ r = -EINVAL;
+ goto out_exec;
+ }
+
+
+ robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT |
AMDGPU_GEM_DOMAIN_CPU);
- robj->allowed_domains = robj->prefered_domains;
+ robj->allowed_domains = robj->preferred_domains;
if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
- amdgpu_bo_unreserve(robj);
+ if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
+ amdgpu_vm_bo_invalidate(robj, true);
+ drm_exec_fini(&exec);
+ break;
+ case AMDGPU_GEM_OP_GET_MAPPING_INFO: {
+ struct amdgpu_bo_va *bo_va = amdgpu_vm_bo_find(&fpriv->vm, robj);
+ struct drm_amdgpu_gem_vm_entry *vm_entries;
+ struct amdgpu_bo_va_mapping *mapping;
+ int num_mappings = 0;
+ /*
+ * num_entries is set as an input to the size of the user-allocated array of
+ * drm_amdgpu_gem_vm_entry stored at args->value.
+ * num_entries is sent back as output as the number of mappings the bo has.
+ * If that number is larger than the size of the array, the ioctl must
+ * be retried.
+ */
+ vm_entries = kvcalloc(args->num_entries, sizeof(*vm_entries), GFP_KERNEL);
+ if (!vm_entries)
+ return -ENOMEM;
+
+ amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) {
+ if (num_mappings < args->num_entries) {
+ vm_entries[num_mappings].addr = mapping->start * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].size = (mapping->last - mapping->start + 1) * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].offset = mapping->offset;
+ vm_entries[num_mappings].flags = mapping->flags;
+ }
+ num_mappings += 1;
+ }
+
+ amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) {
+ if (num_mappings < args->num_entries) {
+ vm_entries[num_mappings].addr = mapping->start * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].size = (mapping->last - mapping->start + 1) * AMDGPU_GPU_PAGE_SIZE;
+ vm_entries[num_mappings].offset = mapping->offset;
+ vm_entries[num_mappings].flags = mapping->flags;
+ }
+ num_mappings += 1;
+ }
+
+ drm_exec_fini(&exec);
+
+ if (num_mappings > 0 && num_mappings <= args->num_entries)
+ if (copy_to_user(u64_to_user_ptr(args->value), vm_entries, num_mappings * sizeof(*vm_entries)))
+ r = -EFAULT;
+
+ args->num_entries = num_mappings;
+
+ kvfree(vm_entries);
break;
+ }
default:
- amdgpu_bo_unreserve(robj);
+ drm_exec_fini(&exec);
r = -EINVAL;
}
-out:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put(gobj);
+ return r;
+out_exec:
+ drm_exec_fini(&exec);
+ drm_gem_object_put(gobj);
return r;
}
+/**
+ * amdgpu_gem_list_handles_ioctl - get information about a process' buffer objects
+ *
+ * @dev: drm device pointer
+ * @data: drm_amdgpu_gem_list_handles
+ * @filp: drm file pointer
+ *
+ * num_entries is set as an input to the size of the entries array.
+ * num_entries is sent back as output as the number of bos in the process.
+ * If that number is larger than the size of the array, the ioctl must
+ * be retried.
+ *
+ * Returns:
+ * 0 for success, -errno for errors.
+ */
+int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct drm_amdgpu_gem_list_handles *args = data;
+ struct drm_amdgpu_gem_list_handles_entry *bo_entries;
+ struct drm_gem_object *gobj;
+ int id, ret = 0;
+ int bo_index = 0;
+ int num_bos = 0;
+
+ spin_lock(&filp->table_lock);
+ idr_for_each_entry(&filp->object_idr, gobj, id)
+ num_bos += 1;
+ spin_unlock(&filp->table_lock);
+
+ if (args->num_entries < num_bos) {
+ args->num_entries = num_bos;
+ return 0;
+ }
+
+ if (num_bos == 0) {
+ args->num_entries = 0;
+ return 0;
+ }
+
+ bo_entries = kvcalloc(num_bos, sizeof(*bo_entries), GFP_KERNEL);
+ if (!bo_entries)
+ return -ENOMEM;
+
+ spin_lock(&filp->table_lock);
+ idr_for_each_entry(&filp->object_idr, gobj, id) {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
+ struct drm_amdgpu_gem_list_handles_entry *bo_entry;
+
+ if (bo_index >= num_bos) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ bo_entry = &bo_entries[bo_index];
+
+ bo_entry->size = amdgpu_bo_size(bo);
+ bo_entry->alloc_flags = bo->flags & AMDGPU_GEM_CREATE_SETTABLE_MASK;
+ bo_entry->preferred_domains = bo->preferred_domains;
+ bo_entry->gem_handle = id;
+ bo_entry->alignment = bo->tbo.page_alignment;
+
+ if (bo->tbo.base.import_attach)
+ bo_entry->flags |= AMDGPU_GEM_LIST_HANDLES_FLAG_IS_IMPORT;
+
+ bo_index += 1;
+ }
+ spin_unlock(&filp->table_lock);
+
+ args->num_entries = bo_index;
+
+ if (!ret)
+ if (copy_to_user(u64_to_user_ptr(args->entries), bo_entries, num_bos * sizeof(*bo_entries)))
+ ret = -EFAULT;
+
+ kvfree(bo_entries);
+
+ return ret;
+}
+
+static int amdgpu_gem_align_pitch(struct amdgpu_device *adev,
+ int width,
+ int cpp,
+ bool tiled)
+{
+ int aligned = width;
+ int pitch_mask = 0;
+
+ switch (cpp) {
+ case 1:
+ pitch_mask = 255;
+ break;
+ case 2:
+ pitch_mask = 127;
+ break;
+ case 3:
+ case 4:
+ pitch_mask = 63;
+ break;
+ }
+
+ aligned += pitch_mask;
+ aligned &= ~pitch_mask;
+ return aligned * cpp;
+}
+
int amdgpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct drm_gem_object *gobj;
uint32_t handle;
+ u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ u32 domain;
int r;
- args->pitch = amdgpu_align_pitch(adev, args->width,
- DIV_ROUND_UP(args->bpp, 8), 0);
+ /*
+ * The buffer returned from this function should be cleared, but
+ * it can only be done if the ring is enabled or we'll fail to
+ * create the buffer.
+ */
+ if (adev->mman.buffer_funcs_enabled)
+ flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
+
+ args->pitch = amdgpu_gem_align_pitch(adev, args->width,
+ DIV_ROUND_UP(args->bpp, 8), 0);
args->size = (u64)args->pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
-
- r = amdgpu_gem_object_create(adev, args->size, 0,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- ttm_bo_type_device,
- &gobj);
+ domain = amdgpu_bo_get_preferred_domain(adev,
+ amdgpu_display_supported_domains(adev, flags));
+ r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
+ ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1);
if (r)
return -ENOMEM;
r = drm_gem_handle_create(file_priv, gobj, &handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(gobj);
- if (r) {
+ drm_gem_object_put(gobj);
+ if (r)
return r;
- }
+
args->handle = handle;
return 0;
}
#if defined(CONFIG_DEBUG_FS)
-static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
+static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
{
- struct drm_gem_object *gobj = ptr;
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
- struct seq_file *m = data;
-
- unsigned domain;
- const char *placement;
- unsigned pin_count;
-
- domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
- switch (domain) {
- case AMDGPU_GEM_DOMAIN_VRAM:
- placement = "VRAM";
- break;
- case AMDGPU_GEM_DOMAIN_GTT:
- placement = " GTT";
- break;
- case AMDGPU_GEM_DOMAIN_CPU:
- default:
- placement = " CPU";
- break;
- }
- seq_printf(m, "\t0x%08x: %12ld byte %s @ 0x%010Lx",
- id, amdgpu_bo_size(bo), placement,
- amdgpu_bo_gpu_offset(bo));
-
- pin_count = ACCESS_ONCE(bo->pin_count);
- if (pin_count)
- seq_printf(m, " pin count %d", pin_count);
- seq_printf(m, "\n");
-
- return 0;
-}
-
-static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = m->private;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_file *file;
int r;
@@ -845,6 +1255,9 @@ static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
list_for_each_entry(file, &dev->filelist, lhead) {
struct task_struct *task;
+ struct drm_gem_object *gobj;
+ struct pid *pid;
+ int id;
/*
* Although we have a valid reference on file->pid, that does
@@ -853,13 +1266,18 @@ static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
* Therefore, we need to protect this ->comm access using RCU.
*/
rcu_read_lock();
- task = pid_task(file->pid, PIDTYPE_PID);
- seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
+ pid = rcu_dereference(file->pid);
+ task = pid_task(pid, PIDTYPE_TGID);
+ seq_printf(m, "pid %8d command %s:\n", pid_nr(pid),
task ? task->comm : "<unknown>");
rcu_read_unlock();
spin_lock(&file->table_lock);
- idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m);
+ idr_for_each_entry(&file->object_idr, gobj, id) {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
+
+ amdgpu_bo_print_info(id, bo, m);
+ }
spin_unlock(&file->table_lock);
}
@@ -867,15 +1285,17 @@ static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
- {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
-};
+DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_gem_info);
+
#endif
-int amdgpu_gem_debugfs_init(struct amdgpu_device *adev)
+void amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
- return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1);
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+
+ debugfs_create_file("amdgpu_gem_info", 0444, root, adev,
+ &amdgpu_debugfs_gem_info_fops);
#endif
- return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
new file mode 100644
index 000000000000..b558336bc4c6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_GEM_H__
+#define __AMDGPU_GEM_H__
+
+#include <drm/amdgpu_drm.h>
+#include <drm/drm_gem.h>
+
+/*
+ * GEM.
+ */
+
+#define AMDGPU_GEM_DOMAIN_MAX 0x3
+#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, tbo.base)
+
+extern const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
+
+unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
+
+/*
+ * GEM objects.
+ */
+void amdgpu_gem_force_release(struct amdgpu_device *adev);
+int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
+ int alignment, u32 initial_domain,
+ u64 flags, enum ttm_bo_type type,
+ struct dma_resv *resv,
+ struct drm_gem_object **obj, int8_t xcp_id_plus1);
+int amdgpu_mode_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int amdgpu_mode_dumb_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p);
+
+int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int amdgpu_gem_list_handles_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+
+int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+
+#define AMDGPU_GEM_CREATE_SETTABLE_MASK (AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | \
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS | \
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC | \
+ AMDGPU_GEM_CREATE_VRAM_CLEARED | \
+ AMDGPU_GEM_CREATE_VM_ALWAYS_VALID | \
+ AMDGPU_GEM_CREATE_EXPLICIT_SYNC | \
+ AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE | \
+ AMDGPU_GEM_CREATE_ENCRYPTED | \
+ AMDGPU_GEM_CREATE_GFX12_DCC | \
+ AMDGPU_GEM_CREATE_DISCARDABLE | \
+ AMDGPU_GEM_CREATE_COHERENT | \
+ AMDGPU_GEM_CREATE_UNCACHED | \
+ AMDGPU_GEM_CREATE_EXT_COHERENT)
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 19943356cca7..ebe2b4c68b0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -22,47 +22,78 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <drm/drmP.h>
+
+#include <linux/firmware.h>
+#include <linux/pm_runtime.h>
+
#include "amdgpu.h"
#include "amdgpu_gfx.h"
+#include "amdgpu_rlc.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_xcp.h"
+#include "amdgpu_xgmi.h"
+#include "nvd.h"
+
+/* delay 0.1 second to enable gfx off feature */
+#define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
+
+#define GFX_OFF_NO_DELAY 0
/*
- * GPU scratch registers helpers function.
- */
-/**
- * amdgpu_gfx_scratch_get - Allocate a scratch register
- *
- * @adev: amdgpu_device pointer
- * @reg: scratch register mmio offset
- *
- * Allocate a CP scratch register for use by the driver (all asics).
- * Returns 0 on success or -EINVAL on failure.
+ * GPU GFX IP block helpers function.
*/
-int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg)
+
+int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
+ int pipe, int queue)
{
- int i;
+ int bit = 0;
- i = ffs(adev->gfx.scratch.free_mask);
- if (i != 0 && i <= adev->gfx.scratch.num_reg) {
- i--;
- adev->gfx.scratch.free_mask &= ~(1u << i);
- *reg = adev->gfx.scratch.reg_base + i;
- return 0;
- }
- return -EINVAL;
+ bit += mec * adev->gfx.mec.num_pipe_per_mec
+ * adev->gfx.mec.num_queue_per_pipe;
+ bit += pipe * adev->gfx.mec.num_queue_per_pipe;
+ bit += queue;
+
+ return bit;
}
-/**
- * amdgpu_gfx_scratch_free - Free a scratch register
- *
- * @adev: amdgpu_device pointer
- * @reg: scratch register mmio offset
- *
- * Free a CP scratch register allocated for use by the driver (all asics)
- */
-void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg)
+void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
+ int *mec, int *pipe, int *queue)
+{
+ *queue = bit % adev->gfx.mec.num_queue_per_pipe;
+ *pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
+ % adev->gfx.mec.num_pipe_per_mec;
+ *mec = (bit / adev->gfx.mec.num_queue_per_pipe)
+ / adev->gfx.mec.num_pipe_per_mec;
+
+}
+
+bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
+ int xcc_id, int mec, int pipe, int queue)
{
- adev->gfx.scratch.free_mask |= 1u << (reg - adev->gfx.scratch.reg_base);
+ return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue),
+ adev->gfx.mec_bitmap[xcc_id].queue_bitmap);
+}
+
+static int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
+ int me, int pipe, int queue)
+{
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
+ int bit = 0;
+
+ bit += me * adev->gfx.me.num_pipe_per_me
+ * num_queue_per_pipe;
+ bit += pipe * num_queue_per_pipe;
+ bit += queue;
+
+ return bit;
+}
+
+bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
+ int me, int pipe, int queue)
+{
+ return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue),
+ adev->gfx.me.queue_bitmap);
}
/**
@@ -75,9 +106,9 @@ void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg)
* The bitmask of CUs to be disabled in the shader array determined by se and
* sh is stored in mask[se * max_sh + sh].
*/
-void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh)
+void amdgpu_gfx_parse_disable_cu(unsigned int *mask, unsigned int max_se, unsigned int max_sh)
{
- unsigned se, sh, cu;
+ unsigned int se, sh, cu;
const char *p;
memset(mask, 0, sizeof(*mask) * max_se * max_sh);
@@ -89,6 +120,7 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_s
for (;;) {
char *next;
int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
+
if (ret < 3) {
DRM_ERROR("amdgpu: could not parse disable_cu\n");
return;
@@ -108,3 +140,2348 @@ void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_s
p = next + 1;
}
}
+
+static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev)
+{
+ return amdgpu_async_gfx_ring && adev->gfx.me.num_pipe_per_me > 1;
+}
+
+static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
+{
+ if (amdgpu_compute_multipipe != -1) {
+ dev_info(adev->dev, "amdgpu: forcing compute pipe policy %d\n",
+ amdgpu_compute_multipipe);
+ return amdgpu_compute_multipipe == 1;
+ }
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0))
+ return true;
+
+ /* FIXME: spreading the queues across pipes causes perf regressions
+ * on POLARIS11 compute workloads */
+ if (adev->asic_type == CHIP_POLARIS11)
+ return false;
+
+ return adev->gfx.mec.num_mec > 1;
+}
+
+bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ int queue = ring->queue;
+ int pipe = ring->pipe;
+
+ /* Policy: use pipe1 queue0 as high priority graphics queue if we
+ * have more than one gfx pipe.
+ */
+ if (amdgpu_gfx_is_graphics_multipipe_capable(adev) &&
+ adev->gfx.num_gfx_rings > 1 && pipe == 1 && queue == 0) {
+ int me = ring->me;
+ int bit;
+
+ bit = amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue);
+ if (ring == &adev->gfx.gfx_ring[bit])
+ return true;
+ }
+
+ return false;
+}
+
+bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ /* Policy: use 1st queue as high priority compute queue if we
+ * have more than one compute queue.
+ */
+ if (adev->gfx.num_compute_rings > 1 &&
+ ring == &adev->gfx.compute_ring[0])
+ return true;
+
+ return false;
+}
+
+void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
+{
+ int i, j, queue, pipe;
+ bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev);
+ int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
+ adev->gfx.mec.num_queue_per_pipe,
+ adev->gfx.num_compute_rings);
+ int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
+
+ if (multipipe_policy) {
+ /* policy: make queues evenly cross all pipes on MEC1 only
+ * for multiple xcc, just use the original policy for simplicity */
+ for (j = 0; j < num_xcc; j++) {
+ for (i = 0; i < max_queues_per_mec; i++) {
+ pipe = i % adev->gfx.mec.num_pipe_per_mec;
+ queue = (i / adev->gfx.mec.num_pipe_per_mec) %
+ adev->gfx.mec.num_queue_per_pipe;
+
+ set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
+ adev->gfx.mec_bitmap[j].queue_bitmap);
+ }
+ }
+ } else {
+ /* policy: amdgpu owns all queues in the given pipe */
+ for (j = 0; j < num_xcc; j++) {
+ for (i = 0; i < max_queues_per_mec; ++i)
+ set_bit(i, adev->gfx.mec_bitmap[j].queue_bitmap);
+ }
+ }
+
+ for (j = 0; j < num_xcc; j++) {
+ dev_dbg(adev->dev, "mec queue bitmap weight=%d\n",
+ bitmap_weight(adev->gfx.mec_bitmap[j].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
+ }
+}
+
+void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
+{
+ int i, queue, pipe;
+ bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev);
+ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */
+ int max_queues_per_me = adev->gfx.me.num_pipe_per_me * num_queue_per_pipe;
+
+ if (multipipe_policy) {
+ /* policy: amdgpu owns the first queue per pipe at this stage
+ * will extend to mulitple queues per pipe later */
+ for (i = 0; i < max_queues_per_me; i++) {
+ pipe = i % adev->gfx.me.num_pipe_per_me;
+ queue = (i / adev->gfx.me.num_pipe_per_me) %
+ num_queue_per_pipe;
+
+ set_bit(pipe * num_queue_per_pipe + queue,
+ adev->gfx.me.queue_bitmap);
+ }
+ } else {
+ for (i = 0; i < max_queues_per_me; ++i)
+ set_bit(i, adev->gfx.me.queue_bitmap);
+ }
+
+ /* update the number of active graphics rings */
+ if (adev->gfx.num_gfx_rings)
+ adev->gfx.num_gfx_rings =
+ bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
+}
+
+static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring, int xcc_id)
+{
+ int queue_bit;
+ int mec, pipe, queue;
+
+ queue_bit = adev->gfx.mec.num_mec
+ * adev->gfx.mec.num_pipe_per_mec
+ * adev->gfx.mec.num_queue_per_pipe;
+
+ while (--queue_bit >= 0) {
+ if (test_bit(queue_bit, adev->gfx.mec_bitmap[xcc_id].queue_bitmap))
+ continue;
+
+ amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
+
+ /*
+ * 1. Using pipes 2/3 from MEC 2 seems cause problems.
+ * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN
+ * only can be issued on queue 0.
+ */
+ if ((mec == 1 && pipe > 1) || queue != 0)
+ continue;
+
+ ring->me = mec + 1;
+ ring->pipe = pipe;
+ ring->queue = queue;
+
+ return 0;
+ }
+
+ dev_err(adev->dev, "Failed to find a queue for KIQ\n");
+ return -EINVAL;
+}
+
+int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, int xcc_id)
+{
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
+ struct amdgpu_irq_src *irq = &kiq->irq;
+ struct amdgpu_ring *ring = &kiq->ring;
+ int r = 0;
+
+ spin_lock_init(&kiq->ring_lock);
+
+ ring->adev = NULL;
+ ring->ring_obj = NULL;
+ ring->use_doorbell = true;
+ ring->xcc_id = xcc_id;
+ ring->vm_hub = AMDGPU_GFXHUB(xcc_id);
+ ring->doorbell_index =
+ (adev->doorbell_index.kiq +
+ xcc_id * adev->doorbell_index.xcc_doorbell_range)
+ << 1;
+
+ r = amdgpu_gfx_kiq_acquire(adev, ring, xcc_id);
+ if (r)
+ return r;
+
+ ring->eop_gpu_addr = kiq->eop_gpu_addr;
+ ring->no_scheduler = true;
+ snprintf(ring->name, sizeof(ring->name), "kiq_%hhu.%hhu.%hhu.%hhu",
+ (unsigned char)xcc_id, (unsigned char)ring->me,
+ (unsigned char)ring->pipe, (unsigned char)ring->queue);
+ r = amdgpu_ring_init(adev, ring, 1024, irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
+ if (r)
+ dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
+
+ return r;
+}
+
+void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_fini(ring);
+}
+
+void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id)
+{
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
+
+ amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
+}
+
+int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
+ unsigned int hpd_size, int xcc_id)
+{
+ int r;
+ u32 *hpd;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
+
+ r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
+ &kiq->eop_gpu_addr, (void **)&hpd);
+ if (r) {
+ dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r);
+ return r;
+ }
+
+ memset(hpd, 0, hpd_size);
+
+ r = amdgpu_bo_reserve(kiq->eop_obj, true);
+ if (unlikely(r != 0))
+ dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
+ amdgpu_bo_kunmap(kiq->eop_obj);
+ amdgpu_bo_unreserve(kiq->eop_obj);
+
+ return 0;
+}
+
+/* create MQD for each compute/gfx queue */
+int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
+ unsigned int mqd_size, int xcc_id)
+{
+ int r, i, j;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
+ struct amdgpu_ring *ring = &kiq->ring;
+ u32 domain = AMDGPU_GEM_DOMAIN_GTT;
+
+#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
+ /* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))
+ domain |= AMDGPU_GEM_DOMAIN_VRAM;
+#endif
+
+ /* create MQD for KIQ */
+ if (!adev->enable_mes_kiq && !ring->mqd_obj) {
+ /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
+ * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
+ * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
+ * KIQ MQD no matter SRIOV or Bare-metal
+ */
+ r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &ring->mqd_obj,
+ &ring->mqd_gpu_addr,
+ &ring->mqd_ptr);
+ if (r) {
+ dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
+ return r;
+ }
+
+ /* prepare MQD backup */
+ kiq->mqd_backup = kzalloc(mqd_size, GFP_KERNEL);
+ if (!kiq->mqd_backup) {
+ dev_warn(adev->dev,
+ "no memory to create MQD backup for ring %s\n", ring->name);
+ return -ENOMEM;
+ }
+ }
+
+ if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
+ /* create MQD for each KGQ */
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ if (!ring->mqd_obj) {
+ r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
+ domain, &ring->mqd_obj,
+ &ring->mqd_gpu_addr, &ring->mqd_ptr);
+ if (r) {
+ dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
+ return r;
+ }
+
+ ring->mqd_size = mqd_size;
+ /* prepare MQD backup */
+ adev->gfx.me.mqd_backup[i] = kzalloc(mqd_size, GFP_KERNEL);
+ if (!adev->gfx.me.mqd_backup[i]) {
+ dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
+ return -ENOMEM;
+ }
+ }
+ }
+ }
+
+ /* create MQD for each KCQ */
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_compute_rings;
+ ring = &adev->gfx.compute_ring[j];
+ if (!ring->mqd_obj) {
+ r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
+ domain, &ring->mqd_obj,
+ &ring->mqd_gpu_addr, &ring->mqd_ptr);
+ if (r) {
+ dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
+ return r;
+ }
+
+ ring->mqd_size = mqd_size;
+ /* prepare MQD backup */
+ adev->gfx.mec.mqd_backup[j] = kzalloc(mqd_size, GFP_KERNEL);
+ if (!adev->gfx.mec.mqd_backup[j]) {
+ dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
+ return -ENOMEM;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id)
+{
+ struct amdgpu_ring *ring = NULL;
+ int i, j;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
+
+ if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ ring = &adev->gfx.gfx_ring[i];
+ kfree(adev->gfx.me.mqd_backup[i]);
+ amdgpu_bo_free_kernel(&ring->mqd_obj,
+ &ring->mqd_gpu_addr,
+ &ring->mqd_ptr);
+ }
+ }
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_compute_rings;
+ ring = &adev->gfx.compute_ring[j];
+ kfree(adev->gfx.mec.mqd_backup[j]);
+ amdgpu_bo_free_kernel(&ring->mqd_obj,
+ &ring->mqd_gpu_addr,
+ &ring->mqd_ptr);
+ }
+
+ ring = &kiq->ring;
+ kfree(kiq->mqd_backup);
+ amdgpu_bo_free_kernel(&ring->mqd_obj,
+ &ring->mqd_gpu_addr,
+ &ring->mqd_ptr);
+}
+
+int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
+{
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ int i, r = 0;
+ int j;
+
+ if (adev->enable_mes) {
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_compute_rings;
+ amdgpu_mes_unmap_legacy_queue(adev,
+ &adev->gfx.compute_ring[j],
+ RESET_QUEUES, 0, 0);
+ }
+ return 0;
+ }
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ if (!kiq_ring->sched.ready || amdgpu_in_reset(adev))
+ return 0;
+
+ spin_lock(&kiq->ring_lock);
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
+ adev->gfx.num_compute_rings)) {
+ spin_unlock(&kiq->ring_lock);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_compute_rings;
+ kiq->pmf->kiq_unmap_queues(kiq_ring,
+ &adev->gfx.compute_ring[j],
+ RESET_QUEUES, 0, 0);
+ }
+ /* Submit unmap queue packet */
+ amdgpu_ring_commit(kiq_ring);
+ /*
+ * Ring test will do a basic scratch register change check. Just run
+ * this to ensure that unmap queues that is submitted before got
+ * processed successfully before returning.
+ */
+ r = amdgpu_ring_test_helper(kiq_ring);
+
+ spin_unlock(&kiq->ring_lock);
+
+ return r;
+}
+
+int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
+{
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ int i, r = 0;
+ int j;
+
+ if (adev->enable_mes) {
+ if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_gfx_rings;
+ amdgpu_mes_unmap_legacy_queue(adev,
+ &adev->gfx.gfx_ring[j],
+ PREEMPT_QUEUES, 0, 0);
+ }
+ }
+ return 0;
+ }
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ if (!adev->gfx.kiq[0].ring.sched.ready || amdgpu_in_reset(adev))
+ return 0;
+
+ if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
+ spin_lock(&kiq->ring_lock);
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
+ adev->gfx.num_gfx_rings)) {
+ spin_unlock(&kiq->ring_lock);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_gfx_rings;
+ kiq->pmf->kiq_unmap_queues(kiq_ring,
+ &adev->gfx.gfx_ring[j],
+ PREEMPT_QUEUES, 0, 0);
+ }
+ /* Submit unmap queue packet */
+ amdgpu_ring_commit(kiq_ring);
+
+ /*
+ * Ring test will do a basic scratch register change check.
+ * Just run this to ensure that unmap queues that is submitted
+ * before got processed successfully before returning.
+ */
+ r = amdgpu_ring_test_helper(kiq_ring);
+ spin_unlock(&kiq->ring_lock);
+ }
+
+ return r;
+}
+
+int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
+ int queue_bit)
+{
+ int mec, pipe, queue;
+ int set_resource_bit = 0;
+
+ amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
+
+ set_resource_bit = mec * 4 * 8 + pipe * 8 + queue;
+
+ return set_resource_bit;
+}
+
+static int amdgpu_gfx_mes_enable_kcq(struct amdgpu_device *adev, int xcc_id)
+{
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ uint64_t queue_mask = ~0ULL;
+ int r, i, j;
+
+ amdgpu_device_flush_hdp(adev, NULL);
+
+ if (!adev->enable_uni_mes) {
+ spin_lock(&kiq->ring_lock);
+ r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->set_resources_size);
+ if (r) {
+ dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
+ spin_unlock(&kiq->ring_lock);
+ return r;
+ }
+
+ kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
+ r = amdgpu_ring_test_helper(kiq_ring);
+ spin_unlock(&kiq->ring_lock);
+ if (r)
+ dev_err(adev->dev, "KIQ failed to set resources\n");
+ }
+
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_compute_rings;
+ r = amdgpu_mes_map_legacy_queue(adev,
+ &adev->gfx.compute_ring[j]);
+ if (r) {
+ dev_err(adev->dev, "failed to map compute queue\n");
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
+{
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ uint64_t queue_mask = 0;
+ int r, i, j;
+
+ if (adev->mes.enable_legacy_queue_map)
+ return amdgpu_gfx_mes_enable_kcq(adev, xcc_id);
+
+ if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
+ return -EINVAL;
+
+ for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
+ if (!test_bit(i, adev->gfx.mec_bitmap[xcc_id].queue_bitmap))
+ continue;
+
+ /* This situation may be hit in the future if a new HW
+ * generation exposes more than 64 queues. If so, the
+ * definition of queue_mask needs updating */
+ if (WARN_ON(i > (sizeof(queue_mask)*8))) {
+ dev_err(adev->dev, "Invalid KCQ enabled: %d\n", i);
+ break;
+ }
+
+ queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
+ }
+
+ amdgpu_device_flush_hdp(adev, NULL);
+
+ dev_info(adev->dev, "kiq ring mec %d pipe %d q %d\n", kiq_ring->me,
+ kiq_ring->pipe, kiq_ring->queue);
+
+ spin_lock(&kiq->ring_lock);
+ r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
+ adev->gfx.num_compute_rings +
+ kiq->pmf->set_resources_size);
+ if (r) {
+ dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
+ spin_unlock(&kiq->ring_lock);
+ return r;
+ }
+
+ kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_compute_rings;
+ kiq->pmf->kiq_map_queues(kiq_ring,
+ &adev->gfx.compute_ring[j]);
+ }
+ /* Submit map queue packet */
+ amdgpu_ring_commit(kiq_ring);
+ /*
+ * Ring test will do a basic scratch register change check. Just run
+ * this to ensure that map queues that is submitted before got
+ * processed successfully before returning.
+ */
+ r = amdgpu_ring_test_helper(kiq_ring);
+ spin_unlock(&kiq->ring_lock);
+ if (r)
+ dev_err(adev->dev, "KCQ enable failed\n");
+
+ return r;
+}
+
+int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
+{
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ int r, i, j;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
+ return -EINVAL;
+
+ amdgpu_device_flush_hdp(adev, NULL);
+
+ if (adev->mes.enable_legacy_queue_map) {
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_gfx_rings;
+ r = amdgpu_mes_map_legacy_queue(adev,
+ &adev->gfx.gfx_ring[j]);
+ if (r) {
+ dev_err(adev->dev, "failed to map gfx queue\n");
+ return r;
+ }
+ }
+
+ return 0;
+ }
+
+ spin_lock(&kiq->ring_lock);
+ /* No need to map kcq on the slave */
+ if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
+ r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
+ adev->gfx.num_gfx_rings);
+ if (r) {
+ dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
+ spin_unlock(&kiq->ring_lock);
+ return r;
+ }
+
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+ j = i + xcc_id * adev->gfx.num_gfx_rings;
+ kiq->pmf->kiq_map_queues(kiq_ring,
+ &adev->gfx.gfx_ring[j]);
+ }
+ }
+ /* Submit map queue packet */
+ amdgpu_ring_commit(kiq_ring);
+ /*
+ * Ring test will do a basic scratch register change check. Just run
+ * this to ensure that map queues that is submitted before got
+ * processed successfully before returning.
+ */
+ r = amdgpu_ring_test_helper(kiq_ring);
+ spin_unlock(&kiq->ring_lock);
+ if (r)
+ dev_err(adev->dev, "KGQ enable failed\n");
+
+ return r;
+}
+
+static void amdgpu_gfx_do_off_ctrl(struct amdgpu_device *adev, bool enable,
+ bool no_delay)
+{
+ unsigned long delay = GFX_OFF_DELAY_ENABLE;
+
+ if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
+ return;
+
+ mutex_lock(&adev->gfx.gfx_off_mutex);
+
+ if (enable) {
+ /* If the count is already 0, it means there's an imbalance bug somewhere.
+ * Note that the bug may be in a different caller than the one which triggers the
+ * WARN_ON_ONCE.
+ */
+ if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0))
+ goto unlock;
+
+ adev->gfx.gfx_off_req_count--;
+
+ if (adev->gfx.gfx_off_req_count == 0 &&
+ !adev->gfx.gfx_off_state) {
+ /* If going to s2idle, no need to wait */
+ if (no_delay) {
+ if (!amdgpu_dpm_set_powergating_by_smu(adev,
+ AMD_IP_BLOCK_TYPE_GFX, true, 0))
+ adev->gfx.gfx_off_state = true;
+ } else {
+ schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
+ delay);
+ }
+ }
+ } else {
+ if (adev->gfx.gfx_off_req_count == 0) {
+ cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
+
+ if (adev->gfx.gfx_off_state &&
+ !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false, 0)) {
+ adev->gfx.gfx_off_state = false;
+
+ if (adev->gfx.funcs->init_spm_golden) {
+ dev_dbg(adev->dev,
+ "GFXOFF is disabled, re-init SPM golden settings\n");
+ amdgpu_gfx_init_spm_golden(adev);
+ }
+ }
+ }
+
+ adev->gfx.gfx_off_req_count++;
+ }
+
+unlock:
+ mutex_unlock(&adev->gfx.gfx_off_mutex);
+}
+
+/* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
+ *
+ * @adev: amdgpu_device pointer
+ * @bool enable true: enable gfx off feature, false: disable gfx off feature
+ *
+ * 1. gfx off feature will be enabled by gfx ip after gfx cg pg enabled.
+ * 2. other client can send request to disable gfx off feature, the request should be honored.
+ * 3. other client can cancel their request of disable gfx off feature
+ * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
+ *
+ * gfx off allow will be delayed by GFX_OFF_DELAY_ENABLE ms.
+ */
+void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
+{
+ /* If going to s2idle, no need to wait */
+ bool no_delay = adev->in_s0ix ? true : false;
+
+ amdgpu_gfx_do_off_ctrl(adev, enable, no_delay);
+}
+
+/* amdgpu_gfx_off_ctrl_immediate - Handle gfx off feature enable/disable
+ *
+ * @adev: amdgpu_device pointer
+ * @bool enable true: enable gfx off feature, false: disable gfx off feature
+ *
+ * 1. gfx off feature will be enabled by gfx ip after gfx cg pg enabled.
+ * 2. other client can send request to disable gfx off feature, the request should be honored.
+ * 3. other client can cancel their request of disable gfx off feature
+ * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
+ *
+ * gfx off allow will be issued immediately.
+ */
+void amdgpu_gfx_off_ctrl_immediate(struct amdgpu_device *adev, bool enable)
+{
+ amdgpu_gfx_do_off_ctrl(adev, enable, true);
+}
+
+int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value)
+{
+ int r = 0;
+
+ mutex_lock(&adev->gfx.gfx_off_mutex);
+
+ r = amdgpu_dpm_set_residency_gfxoff(adev, value);
+
+ mutex_unlock(&adev->gfx.gfx_off_mutex);
+
+ return r;
+}
+
+int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *value)
+{
+ int r = 0;
+
+ mutex_lock(&adev->gfx.gfx_off_mutex);
+
+ r = amdgpu_dpm_get_residency_gfxoff(adev, value);
+
+ mutex_unlock(&adev->gfx.gfx_off_mutex);
+
+ return r;
+}
+
+int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value)
+{
+ int r = 0;
+
+ mutex_lock(&adev->gfx.gfx_off_mutex);
+
+ r = amdgpu_dpm_get_entrycount_gfxoff(adev, value);
+
+ mutex_unlock(&adev->gfx.gfx_off_mutex);
+
+ return r;
+}
+
+int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
+{
+
+ int r = 0;
+
+ mutex_lock(&adev->gfx.gfx_off_mutex);
+
+ r = amdgpu_dpm_get_status_gfxoff(adev, value);
+
+ mutex_unlock(&adev->gfx.gfx_off_mutex);
+
+ return r;
+}
+
+int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r;
+
+ if (amdgpu_ras_is_supported(adev, ras_block->block)) {
+ if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
+ r = amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
+ if (r)
+ return r;
+ }
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ if (amdgpu_sriov_vf(adev))
+ return r;
+
+ if (adev->gfx.cp_ecc_error_irq.funcs) {
+ r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
+ if (r)
+ goto late_fini;
+ }
+ } else {
+ amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
+ }
+
+ return 0;
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+ return r;
+}
+
+int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err = 0;
+ struct amdgpu_gfx_ras *ras = NULL;
+
+ /* adev->gfx.ras is NULL, which means gfx does not
+ * support ras function, then do nothing here.
+ */
+ if (!adev->gfx.ras)
+ return 0;
+
+ ras = adev->gfx.ras;
+
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register gfx ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "gfx");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->gfx.ras_if = &ras->ras_block.ras_comm;
+
+ /* If not define special ras_late_init function, use gfx default ras_late_init */
+ if (!ras->ras_block.ras_late_init)
+ ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init;
+
+ /* If not defined special ras_cb function, use default ras_cb */
+ if (!ras->ras_block.ras_cb)
+ ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb;
+
+ return 0;
+}
+
+int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ if (adev->gfx.ras && adev->gfx.ras->poison_consumption_handler)
+ return adev->gfx.ras->poison_consumption_handler(adev, entry);
+
+ return 0;
+}
+
+int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry)
+{
+ /* TODO ue will trigger an interrupt.
+ *
+ * When “Full RAS” is enabled, the per-IP interrupt sources should
+ * be disabled and the driver should only look for the aggregated
+ * interrupt via sync flood
+ */
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops &&
+ adev->gfx.ras->ras_block.hw_ops->query_ras_error_count)
+ adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
+ amdgpu_ras_reset_gpu(adev);
+ }
+ return AMDGPU_RAS_SUCCESS;
+}
+
+int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->gfx.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ dev_err(adev->dev, "CP ECC ERROR IRQ\n");
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
+
+void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev,
+ void *ras_error_status,
+ void (*func)(struct amdgpu_device *adev, void *ras_error_status,
+ int xcc_id))
+{
+ int i;
+ int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
+ uint32_t xcc_mask = GENMASK(num_xcc - 1, 0);
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ if (err_data) {
+ err_data->ue_count = 0;
+ err_data->ce_count = 0;
+ }
+
+ for_each_inst(i, xcc_mask)
+ func(adev, ras_error_status, i);
+}
+
+uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id)
+{
+ signed long r, cnt = 0;
+ unsigned long flags;
+ uint32_t seq, reg_val_offs = 0, value = 0;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
+ struct amdgpu_ring *ring = &kiq->ring;
+
+ if (amdgpu_device_skip_hw_access(adev))
+ return 0;
+
+ if (adev->mes.ring[0].sched.ready)
+ return amdgpu_mes_rreg(adev, reg);
+
+ BUG_ON(!ring->funcs->emit_rreg);
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
+ pr_err("critical bug! too many kiq readers\n");
+ goto failed_unlock;
+ }
+ r = amdgpu_ring_alloc(ring, 32);
+ if (r)
+ goto failed_unlock;
+
+ amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r)
+ goto failed_undo;
+
+ amdgpu_ring_commit(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ /* don't wait anymore for gpu reset case because this way may
+ * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+ * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+ * never return if we keep waiting in virt_kiq_rreg, which cause
+ * gpu_recover() hang there.
+ *
+ * also don't wait anymore for IRQ context
+ * */
+ if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
+ goto failed_kiq_read;
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ if (amdgpu_in_reset(adev))
+ goto failed_kiq_read;
+
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY)
+ goto failed_kiq_read;
+
+ mb();
+ value = adev->wb.wb[reg_val_offs];
+ amdgpu_device_wb_free(adev, reg_val_offs);
+ return value;
+
+failed_undo:
+ amdgpu_ring_undo(ring);
+failed_unlock:
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+failed_kiq_read:
+ if (reg_val_offs)
+ amdgpu_device_wb_free(adev, reg_val_offs);
+ dev_err(adev->dev, "failed to read reg:%x\n", reg);
+ return ~0;
+}
+
+void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id)
+{
+ signed long r, cnt = 0;
+ unsigned long flags;
+ uint32_t seq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
+ struct amdgpu_ring *ring = &kiq->ring;
+
+ BUG_ON(!ring->funcs->emit_wreg);
+
+ if (amdgpu_device_skip_hw_access(adev))
+ return;
+
+ if (adev->mes.ring[0].sched.ready) {
+ amdgpu_mes_wreg(adev, reg, v);
+ return;
+ }
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ r = amdgpu_ring_alloc(ring, 32);
+ if (r)
+ goto failed_unlock;
+
+ amdgpu_ring_emit_wreg(ring, reg, v);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r)
+ goto failed_undo;
+
+ amdgpu_ring_commit(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ /* don't wait anymore for gpu reset case because this way may
+ * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+ * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+ * never return if we keep waiting in virt_kiq_rreg, which cause
+ * gpu_recover() hang there.
+ *
+ * also don't wait anymore for IRQ context
+ * */
+ if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
+ goto failed_kiq_write;
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ if (amdgpu_in_reset(adev))
+ goto failed_kiq_write;
+
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY)
+ goto failed_kiq_write;
+
+ return;
+
+failed_undo:
+ amdgpu_ring_undo(ring);
+failed_unlock:
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+failed_kiq_write:
+ dev_err(adev->dev, "failed to write reg:%x\n", reg);
+}
+
+int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
+{
+ if (amdgpu_num_kcq == -1) {
+ return 8;
+ } else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) {
+ dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n");
+ return 8;
+ }
+ return amdgpu_num_kcq;
+}
+
+void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev,
+ uint32_t ucode_id)
+{
+ const struct gfx_firmware_header_v1_0 *cp_hdr;
+ const struct gfx_firmware_header_v2_0 *cp_hdr_v2_0;
+ struct amdgpu_firmware_info *info = NULL;
+ const struct firmware *ucode_fw;
+ unsigned int fw_size;
+
+ switch (ucode_id) {
+ case AMDGPU_UCODE_ID_CP_PFP:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.pfp_fw->data;
+ adev->gfx.pfp_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.pfp_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ ucode_fw = adev->gfx.pfp_fw;
+ fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_PFP:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.pfp_fw->data;
+ adev->gfx.pfp_fw_version =
+ le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
+ adev->gfx.pfp_feature_version =
+ le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
+ ucode_fw = adev->gfx.pfp_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
+ case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.pfp_fw->data;
+ ucode_fw = adev->gfx.pfp_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_ME:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.me_fw->data;
+ adev->gfx.me_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.me_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ ucode_fw = adev->gfx.me_fw;
+ fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_ME:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.me_fw->data;
+ adev->gfx.me_fw_version =
+ le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
+ adev->gfx.me_feature_version =
+ le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
+ ucode_fw = adev->gfx.me_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
+ case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.me_fw->data;
+ ucode_fw = adev->gfx.me_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_CE:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.ce_fw->data;
+ adev->gfx.ce_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.ce_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ ucode_fw = adev->gfx.ce_fw;
+ fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC1:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec_fw->data;
+ adev->gfx.mec_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.mec_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ ucode_fw = adev->gfx.mec_fw;
+ fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
+ le32_to_cpu(cp_hdr->jt_size) * 4;
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC1_JT:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec_fw->data;
+ ucode_fw = adev->gfx.mec_fw;
+ fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC2:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec2_fw->data;
+ adev->gfx.mec2_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.mec2_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ ucode_fw = adev->gfx.mec2_fw;
+ fw_size = le32_to_cpu(cp_hdr->header.ucode_size_bytes) -
+ le32_to_cpu(cp_hdr->jt_size) * 4;
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC2_JT:
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec2_fw->data;
+ ucode_fw = adev->gfx.mec2_fw;
+ fw_size = le32_to_cpu(cp_hdr->jt_size) * 4;
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_MEC:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.mec_fw->data;
+ adev->gfx.mec_fw_version =
+ le32_to_cpu(cp_hdr_v2_0->header.ucode_version);
+ adev->gfx.mec_feature_version =
+ le32_to_cpu(cp_hdr_v2_0->ucode_feature_version);
+ ucode_fw = adev->gfx.mec_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
+ cp_hdr_v2_0 = (const struct gfx_firmware_header_v2_0 *)
+ adev->gfx.mec_fw->data;
+ ucode_fw = adev->gfx.mec_fw;
+ fw_size = le32_to_cpu(cp_hdr_v2_0->data_size_bytes);
+ break;
+ default:
+ dev_err(adev->dev, "Invalid ucode id %u\n", ucode_id);
+ return;
+ }
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ info = &adev->firmware.ucode[ucode_id];
+ info->ucode_id = ucode_id;
+ info->fw = ucode_fw;
+ adev->firmware.fw_size += ALIGN(fw_size, PAGE_SIZE);
+ }
+}
+
+bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id)
+{
+ return !(xcc_id % (adev->gfx.num_xcc_per_xcp ?
+ adev->gfx.num_xcc_per_xcp : 1));
+}
+
+static ssize_t amdgpu_gfx_get_current_compute_partition(struct device *dev,
+ struct device_attribute *addr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ int mode;
+
+ /* Only minimal precaution taken to reject requests while in reset.*/
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
+ mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
+ AMDGPU_XCP_FL_NONE);
+
+ return sysfs_emit(buf, "%s\n", amdgpu_gfx_compute_mode_desc(mode));
+}
+
+static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev,
+ struct device_attribute *addr,
+ const char *buf, size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ enum amdgpu_gfx_partition mode;
+ int ret = 0, num_xcc;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ if (num_xcc % 2 != 0)
+ return -EINVAL;
+
+ if (!strncasecmp("SPX", buf, strlen("SPX"))) {
+ mode = AMDGPU_SPX_PARTITION_MODE;
+ } else if (!strncasecmp("DPX", buf, strlen("DPX"))) {
+ /*
+ * DPX mode needs AIDs to be in multiple of 2.
+ * Each AID connects 2 XCCs.
+ */
+ if (num_xcc%4)
+ return -EINVAL;
+ mode = AMDGPU_DPX_PARTITION_MODE;
+ } else if (!strncasecmp("TPX", buf, strlen("TPX"))) {
+ if (num_xcc != 6)
+ return -EINVAL;
+ mode = AMDGPU_TPX_PARTITION_MODE;
+ } else if (!strncasecmp("QPX", buf, strlen("QPX"))) {
+ if (num_xcc != 8)
+ return -EINVAL;
+ mode = AMDGPU_QPX_PARTITION_MODE;
+ } else if (!strncasecmp("CPX", buf, strlen("CPX"))) {
+ mode = AMDGPU_CPX_PARTITION_MODE;
+ } else {
+ return -EINVAL;
+ }
+
+ /* Don't allow a switch while under reset */
+ if (!down_read_trylock(&adev->reset_domain->sem))
+ return -EPERM;
+
+ ret = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr, mode);
+
+ up_read(&adev->reset_domain->sem);
+
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const char *xcp_desc[] = {
+ [AMDGPU_SPX_PARTITION_MODE] = "SPX",
+ [AMDGPU_DPX_PARTITION_MODE] = "DPX",
+ [AMDGPU_TPX_PARTITION_MODE] = "TPX",
+ [AMDGPU_QPX_PARTITION_MODE] = "QPX",
+ [AMDGPU_CPX_PARTITION_MODE] = "CPX",
+};
+
+static ssize_t amdgpu_gfx_get_available_compute_partition(struct device *dev,
+ struct device_attribute *addr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
+ int size = 0, mode;
+ char *sep = "";
+
+ if (!xcp_mgr || !xcp_mgr->avail_xcp_modes)
+ return sysfs_emit(buf, "Not supported\n");
+
+ for_each_inst(mode, xcp_mgr->avail_xcp_modes) {
+ size += sysfs_emit_at(buf, size, "%s%s", sep, xcp_desc[mode]);
+ sep = ", ";
+ }
+
+ size += sysfs_emit_at(buf, size, "\n");
+
+ return size;
+}
+
+static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct drm_gpu_scheduler *sched = &ring->sched;
+ struct drm_sched_entity entity;
+ static atomic_t counter;
+ struct dma_fence *f;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ void *owner;
+ int i, r;
+
+ /* Initialize the scheduler entity */
+ r = drm_sched_entity_init(&entity, DRM_SCHED_PRIORITY_NORMAL,
+ &sched, 1, NULL);
+ if (r) {
+ dev_err(adev->dev, "Failed setting up GFX kernel entity.\n");
+ goto err;
+ }
+
+ /*
+ * Use some unique dummy value as the owner to make sure we execute
+ * the cleaner shader on each submission. The value just need to change
+ * for each submission and is otherwise meaningless.
+ */
+ owner = (void *)(unsigned long)atomic_inc_return(&counter);
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, &entity, owner,
+ 64, 0, &job,
+ AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER);
+ if (r)
+ goto err;
+
+ job->enforce_isolation = true;
+ /* always run the cleaner shader */
+ job->run_cleaner_shader = true;
+
+ ib = &job->ibs[0];
+ for (i = 0; i <= ring->funcs->align_mask; ++i)
+ ib->ptr[i] = ring->funcs->nop;
+ ib->length_dw = ring->funcs->align_mask + 1;
+
+ f = amdgpu_job_submit(job);
+
+ r = dma_fence_wait(f, false);
+ if (r)
+ goto err;
+
+ dma_fence_put(f);
+
+ /* Clean up the scheduler entity */
+ drm_sched_entity_destroy(&entity);
+ return 0;
+
+err:
+ return r;
+}
+
+static int amdgpu_gfx_run_cleaner_shader(struct amdgpu_device *adev, int xcp_id)
+{
+ int num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ struct amdgpu_ring *ring;
+ int num_xcc_to_clear;
+ int i, r, xcc_id;
+
+ if (adev->gfx.num_xcc_per_xcp)
+ num_xcc_to_clear = adev->gfx.num_xcc_per_xcp;
+ else
+ num_xcc_to_clear = 1;
+
+ for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings];
+ if ((ring->xcp_id == xcp_id) && ring->sched.ready) {
+ r = amdgpu_gfx_run_cleaner_shader_job(ring);
+ if (r)
+ return r;
+ num_xcc_to_clear--;
+ break;
+ }
+ }
+ }
+
+ if (num_xcc_to_clear)
+ return -ENOENT;
+
+ return 0;
+}
+
+/**
+ * amdgpu_gfx_set_run_cleaner_shader - Execute the AMDGPU GFX Cleaner Shader
+ * @dev: The device structure
+ * @attr: The device attribute structure
+ * @buf: The buffer containing the input data
+ * @count: The size of the input data
+ *
+ * Provides the sysfs interface to manually run a cleaner shader, which is
+ * used to clear the GPU state between different tasks. Writing a value to the
+ * 'run_cleaner_shader' sysfs file triggers the cleaner shader execution.
+ * The value written corresponds to the partition index on multi-partition
+ * devices. On single-partition devices, the value should be '0'.
+ *
+ * The cleaner shader clears the Local Data Store (LDS) and General Purpose
+ * Registers (GPRs) to ensure data isolation between GPU workloads.
+ *
+ * Return: The number of bytes written to the sysfs file.
+ */
+static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ int ret;
+ long value;
+
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+ if (adev->in_suspend && !adev->in_runpm)
+ return -EPERM;
+
+ if (adev->gfx.disable_kq)
+ return -EPERM;
+
+ ret = kstrtol(buf, 0, &value);
+
+ if (ret)
+ return -EINVAL;
+
+ if (value < 0)
+ return -EINVAL;
+
+ if (adev->xcp_mgr) {
+ if (value >= adev->xcp_mgr->num_xcps)
+ return -EINVAL;
+ } else {
+ if (value > 1)
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ ret = amdgpu_gfx_run_cleaner_shader(adev, value);
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+/**
+ * amdgpu_gfx_get_enforce_isolation - Query AMDGPU GFX Enforce Isolation Settings
+ * @dev: The device structure
+ * @attr: The device attribute structure
+ * @buf: The buffer to store the output data
+ *
+ * Provides the sysfs read interface to get the current settings of the 'enforce_isolation'
+ * feature for each GPU partition. Reading from the 'enforce_isolation'
+ * sysfs file returns the isolation settings for all partitions, where '0'
+ * indicates disabled, '1' indicates enabled, and '2' indicates enabled in legacy mode,
+ * and '3' indicates enabled without cleaner shader.
+ *
+ * Return: The number of bytes read from the sysfs file.
+ */
+static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ int i;
+ ssize_t size = 0;
+
+ if (adev->xcp_mgr) {
+ for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
+ size += sysfs_emit_at(buf, size, "%u", adev->enforce_isolation[i]);
+ if (i < (adev->xcp_mgr->num_xcps - 1))
+ size += sysfs_emit_at(buf, size, " ");
+ }
+ buf[size++] = '\n';
+ } else {
+ size = sysfs_emit_at(buf, 0, "%u\n", adev->enforce_isolation[0]);
+ }
+
+ return size;
+}
+
+/**
+ * amdgpu_gfx_set_enforce_isolation - Control AMDGPU GFX Enforce Isolation
+ * @dev: The device structure
+ * @attr: The device attribute structure
+ * @buf: The buffer containing the input data
+ * @count: The size of the input data
+ *
+ * This function allows control over the 'enforce_isolation' feature, which
+ * serializes access to the graphics engine. Writing '0' to disable, '1' to
+ * enable isolation with cleaner shader, '2' to enable legacy isolation without
+ * cleaner shader, or '3' to enable process isolation without submitting the
+ * cleaner shader to the 'enforce_isolation' sysfs file sets the isolation mode
+ * for each partition. The input should specify the setting for all
+ * partitions.
+ *
+ * Return: The number of bytes written to the sysfs file.
+ */
+static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ long partition_values[MAX_XCP] = {0};
+ int ret, i, num_partitions;
+ const char *input_buf = buf;
+
+ for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
+ ret = sscanf(input_buf, "%ld", &partition_values[i]);
+ if (ret <= 0)
+ break;
+
+ /* Move the pointer to the next value in the string */
+ input_buf = strchr(input_buf, ' ');
+ if (input_buf) {
+ input_buf++;
+ } else {
+ i++;
+ break;
+ }
+ }
+ num_partitions = i;
+
+ if (adev->xcp_mgr && num_partitions != adev->xcp_mgr->num_xcps)
+ return -EINVAL;
+
+ if (!adev->xcp_mgr && num_partitions != 1)
+ return -EINVAL;
+
+ for (i = 0; i < num_partitions; i++) {
+ if (partition_values[i] != 0 &&
+ partition_values[i] != 1 &&
+ partition_values[i] != 2 &&
+ partition_values[i] != 3)
+ return -EINVAL;
+ }
+
+ mutex_lock(&adev->enforce_isolation_mutex);
+ for (i = 0; i < num_partitions; i++) {
+ switch (partition_values[i]) {
+ case 0:
+ default:
+ adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE;
+ break;
+ case 1:
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE;
+ break;
+ case 2:
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY;
+ break;
+ case 3:
+ adev->enforce_isolation[i] =
+ AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER;
+ break;
+ }
+ }
+ mutex_unlock(&adev->enforce_isolation_mutex);
+
+ amdgpu_mes_update_enforce_isolation(adev);
+
+ return count;
+}
+
+static ssize_t amdgpu_gfx_get_gfx_reset_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (!adev)
+ return -ENODEV;
+
+ return amdgpu_show_reset_mask(buf, adev->gfx.gfx_supported_reset);
+}
+
+static ssize_t amdgpu_gfx_get_compute_reset_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (!adev)
+ return -ENODEV;
+
+ return amdgpu_show_reset_mask(buf, adev->gfx.compute_supported_reset);
+}
+
+static DEVICE_ATTR(run_cleaner_shader, 0200,
+ NULL, amdgpu_gfx_set_run_cleaner_shader);
+
+static DEVICE_ATTR(enforce_isolation, 0644,
+ amdgpu_gfx_get_enforce_isolation,
+ amdgpu_gfx_set_enforce_isolation);
+
+static DEVICE_ATTR(current_compute_partition, 0644,
+ amdgpu_gfx_get_current_compute_partition,
+ amdgpu_gfx_set_compute_partition);
+
+static DEVICE_ATTR(available_compute_partition, 0444,
+ amdgpu_gfx_get_available_compute_partition, NULL);
+static DEVICE_ATTR(gfx_reset_mask, 0444,
+ amdgpu_gfx_get_gfx_reset_mask, NULL);
+
+static DEVICE_ATTR(compute_reset_mask, 0444,
+ amdgpu_gfx_get_compute_reset_mask, NULL);
+
+static int amdgpu_gfx_sysfs_xcp_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
+ bool xcp_switch_supported;
+ int r;
+
+ if (!xcp_mgr)
+ return 0;
+
+ xcp_switch_supported =
+ (xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode);
+
+ if (!xcp_switch_supported)
+ dev_attr_current_compute_partition.attr.mode &=
+ ~(S_IWUSR | S_IWGRP | S_IWOTH);
+
+ r = device_create_file(adev->dev, &dev_attr_current_compute_partition);
+ if (r)
+ return r;
+
+ if (xcp_switch_supported)
+ r = device_create_file(adev->dev,
+ &dev_attr_available_compute_partition);
+
+ return r;
+}
+
+static void amdgpu_gfx_sysfs_xcp_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
+ bool xcp_switch_supported;
+
+ if (!xcp_mgr)
+ return;
+
+ xcp_switch_supported =
+ (xcp_mgr->funcs && xcp_mgr->funcs->switch_partition_mode);
+ device_remove_file(adev->dev, &dev_attr_current_compute_partition);
+
+ if (xcp_switch_supported)
+ device_remove_file(adev->dev,
+ &dev_attr_available_compute_partition);
+}
+
+static int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = device_create_file(adev->dev, &dev_attr_enforce_isolation);
+ if (r)
+ return r;
+ if (adev->gfx.enable_cleaner_shader)
+ r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader);
+
+ return r;
+}
+
+static void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev)
+{
+ device_remove_file(adev->dev, &dev_attr_enforce_isolation);
+ if (adev->gfx.enable_cleaner_shader)
+ device_remove_file(adev->dev, &dev_attr_run_cleaner_shader);
+}
+
+static int amdgpu_gfx_sysfs_reset_mask_init(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (!amdgpu_gpu_recovery)
+ return r;
+
+ if (adev->gfx.num_gfx_rings) {
+ r = device_create_file(adev->dev, &dev_attr_gfx_reset_mask);
+ if (r)
+ return r;
+ }
+
+ if (adev->gfx.num_compute_rings) {
+ r = device_create_file(adev->dev, &dev_attr_compute_reset_mask);
+ if (r)
+ return r;
+ }
+
+ return r;
+}
+
+static void amdgpu_gfx_sysfs_reset_mask_fini(struct amdgpu_device *adev)
+{
+ if (!amdgpu_gpu_recovery)
+ return;
+
+ if (adev->gfx.num_gfx_rings)
+ device_remove_file(adev->dev, &dev_attr_gfx_reset_mask);
+
+ if (adev->gfx.num_compute_rings)
+ device_remove_file(adev->dev, &dev_attr_compute_reset_mask);
+}
+
+int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_gfx_sysfs_xcp_init(adev);
+ if (r) {
+ dev_err(adev->dev, "failed to create xcp sysfs files");
+ return r;
+ }
+
+ r = amdgpu_gfx_sysfs_isolation_shader_init(adev);
+ if (r)
+ dev_err(adev->dev, "failed to create isolation sysfs files");
+
+ r = amdgpu_gfx_sysfs_reset_mask_init(adev);
+ if (r)
+ dev_err(adev->dev, "failed to create reset mask sysfs files");
+
+ return r;
+}
+
+void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev)
+{
+ if (adev->dev->kobj.sd) {
+ amdgpu_gfx_sysfs_xcp_fini(adev);
+ amdgpu_gfx_sysfs_isolation_shader_fini(adev);
+ amdgpu_gfx_sysfs_reset_mask_fini(adev);
+ }
+}
+
+int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev,
+ unsigned int cleaner_shader_size)
+{
+ if (!adev->gfx.enable_cleaner_shader)
+ return -EOPNOTSUPP;
+
+ return amdgpu_bo_create_kernel(adev, cleaner_shader_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.cleaner_shader_obj,
+ &adev->gfx.cleaner_shader_gpu_addr,
+ (void **)&adev->gfx.cleaner_shader_cpu_ptr);
+}
+
+void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev)
+{
+ if (!adev->gfx.enable_cleaner_shader)
+ return;
+
+ amdgpu_bo_free_kernel(&adev->gfx.cleaner_shader_obj,
+ &adev->gfx.cleaner_shader_gpu_addr,
+ (void **)&adev->gfx.cleaner_shader_cpu_ptr);
+}
+
+void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
+ unsigned int cleaner_shader_size,
+ const void *cleaner_shader_ptr)
+{
+ if (!adev->gfx.enable_cleaner_shader)
+ return;
+
+ if (adev->gfx.cleaner_shader_cpu_ptr && cleaner_shader_ptr)
+ memcpy_toio(adev->gfx.cleaner_shader_cpu_ptr, cleaner_shader_ptr,
+ cleaner_shader_size);
+}
+
+/**
+ * amdgpu_gfx_kfd_sch_ctrl - Control the KFD scheduler from the KGD (Graphics Driver)
+ * @adev: amdgpu_device pointer
+ * @idx: Index of the scheduler to control
+ * @enable: Whether to enable or disable the KFD scheduler
+ *
+ * This function is used to control the KFD (Kernel Fusion Driver) scheduler
+ * from the KGD. It is part of the cleaner shader feature. This function plays
+ * a key role in enforcing process isolation on the GPU.
+ *
+ * The function uses a reference count mechanism (kfd_sch_req_count) to keep
+ * track of the number of requests to enable the KFD scheduler. When a request
+ * to enable the KFD scheduler is made, the reference count is decremented.
+ * When the reference count reaches zero, a delayed work is scheduled to
+ * enforce isolation after a delay of GFX_SLICE_PERIOD.
+ *
+ * When a request to disable the KFD scheduler is made, the function first
+ * checks if the reference count is zero. If it is, it cancels the delayed work
+ * for enforcing isolation and checks if the KFD scheduler is active. If the
+ * KFD scheduler is active, it sends a request to stop the KFD scheduler and
+ * sets the KFD scheduler state to inactive. Then, it increments the reference
+ * count.
+ *
+ * The function is synchronized using the kfd_sch_mutex to ensure that the KFD
+ * scheduler state and reference count are updated atomically.
+ *
+ * Note: If the reference count is already zero when a request to enable the
+ * KFD scheduler is made, it means there's an imbalance bug somewhere. The
+ * function triggers a warning in this case.
+ */
+static void amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx,
+ bool enable)
+{
+ mutex_lock(&adev->gfx.userq_sch_mutex);
+
+ if (enable) {
+ /* If the count is already 0, it means there's an imbalance bug somewhere.
+ * Note that the bug may be in a different caller than the one which triggers the
+ * WARN_ON_ONCE.
+ */
+ if (WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx] == 0)) {
+ dev_err(adev->dev, "Attempted to enable KFD scheduler when reference count is already zero\n");
+ goto unlock;
+ }
+
+ adev->gfx.userq_sch_req_count[idx]--;
+
+ if (adev->gfx.userq_sch_req_count[idx] == 0 &&
+ adev->gfx.userq_sch_inactive[idx]) {
+ schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
+ msecs_to_jiffies(adev->gfx.enforce_isolation_time[idx]));
+ }
+ } else {
+ if (adev->gfx.userq_sch_req_count[idx] == 0) {
+ cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work);
+ if (!adev->gfx.userq_sch_inactive[idx]) {
+ amdgpu_userq_stop_sched_for_enforce_isolation(adev, idx);
+ if (adev->kfd.init_complete)
+ amdgpu_amdkfd_stop_sched(adev, idx);
+ adev->gfx.userq_sch_inactive[idx] = true;
+ }
+ }
+
+ adev->gfx.userq_sch_req_count[idx]++;
+ }
+
+unlock:
+ mutex_unlock(&adev->gfx.userq_sch_mutex);
+}
+
+/**
+ * amdgpu_gfx_enforce_isolation_handler - work handler for enforcing shader isolation
+ *
+ * @work: work_struct.
+ *
+ * This function is the work handler for enforcing shader isolation on AMD GPUs.
+ * It counts the number of emitted fences for each GFX and compute ring. If there
+ * are any fences, it schedules the `enforce_isolation_work` to be run after a
+ * delay of `GFX_SLICE_PERIOD`. If there are no fences, it signals the Kernel Fusion
+ * Driver (KFD) to resume the runqueue. The function is synchronized using the
+ * `enforce_isolation_mutex`.
+ */
+void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
+{
+ struct amdgpu_isolation_work *isolation_work =
+ container_of(work, struct amdgpu_isolation_work, work.work);
+ struct amdgpu_device *adev = isolation_work->adev;
+ u32 i, idx, fences = 0;
+
+ if (isolation_work->xcp_id == AMDGPU_XCP_NO_PARTITION)
+ idx = 0;
+ else
+ idx = isolation_work->xcp_id;
+
+ if (idx >= MAX_XCP)
+ return;
+
+ mutex_lock(&adev->enforce_isolation_mutex);
+ for (i = 0; i < AMDGPU_MAX_GFX_RINGS; ++i) {
+ if (isolation_work->xcp_id == adev->gfx.gfx_ring[i].xcp_id)
+ fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]);
+ }
+ for (i = 0; i < (AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES); ++i) {
+ if (isolation_work->xcp_id == adev->gfx.compute_ring[i].xcp_id)
+ fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]);
+ }
+ if (fences) {
+ /* we've already had our timeslice, so let's wrap this up */
+ schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
+ msecs_to_jiffies(1));
+ } else {
+ /* Tell KFD to resume the runqueue */
+ WARN_ON_ONCE(!adev->gfx.userq_sch_inactive[idx]);
+ WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx]);
+
+ amdgpu_userq_start_sched_for_enforce_isolation(adev, idx);
+ if (adev->kfd.init_complete)
+ amdgpu_amdkfd_start_sched(adev, idx);
+ adev->gfx.userq_sch_inactive[idx] = false;
+ }
+ mutex_unlock(&adev->enforce_isolation_mutex);
+}
+
+/**
+ * amdgpu_gfx_enforce_isolation_wait_for_kfd - Manage KFD wait period for process isolation
+ * @adev: amdgpu_device pointer
+ * @idx: Index of the GPU partition
+ *
+ * When kernel submissions come in, the jobs are given a time slice and once
+ * that time slice is up, if there are KFD user queues active, kernel
+ * submissions are blocked until KFD has had its time slice. Once the KFD time
+ * slice is up, KFD user queues are preempted and kernel submissions are
+ * unblocked and allowed to run again.
+ */
+static void
+amdgpu_gfx_enforce_isolation_wait_for_kfd(struct amdgpu_device *adev,
+ u32 idx)
+{
+ unsigned long cjiffies;
+ bool wait = false;
+
+ mutex_lock(&adev->enforce_isolation_mutex);
+ if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
+ /* set the initial values if nothing is set */
+ if (!adev->gfx.enforce_isolation_jiffies[idx]) {
+ adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
+ adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS;
+ }
+ /* Make sure KFD gets a chance to run */
+ if (amdgpu_amdkfd_compute_active(adev, idx)) {
+ cjiffies = jiffies;
+ if (time_after(cjiffies, adev->gfx.enforce_isolation_jiffies[idx])) {
+ cjiffies -= adev->gfx.enforce_isolation_jiffies[idx];
+ if ((jiffies_to_msecs(cjiffies) >= GFX_SLICE_PERIOD_MS)) {
+ /* if our time is up, let KGD work drain before scheduling more */
+ wait = true;
+ /* reset the timer period */
+ adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS;
+ } else {
+ /* set the timer period to what's left in our time slice */
+ adev->gfx.enforce_isolation_time[idx] =
+ GFX_SLICE_PERIOD_MS - jiffies_to_msecs(cjiffies);
+ }
+ } else {
+ /* if jiffies wrap around we will just wait a little longer */
+ adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
+ }
+ } else {
+ /* if there is no KFD work, then set the full slice period */
+ adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
+ adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS;
+ }
+ }
+ mutex_unlock(&adev->enforce_isolation_mutex);
+
+ if (wait)
+ msleep(GFX_SLICE_PERIOD_MS);
+}
+
+/**
+ * amdgpu_gfx_enforce_isolation_ring_begin_use - Begin use of a ring with enforced isolation
+ * @ring: Pointer to the amdgpu_ring structure
+ *
+ * Ring begin_use helper implementation for gfx which serializes access to the
+ * gfx IP between kernel submission IOCTLs and KFD user queues when isolation
+ * enforcement is enabled. The kernel submission IOCTLs and KFD user queues
+ * each get a time slice when both are active.
+ */
+void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 idx;
+ bool sched_work = false;
+
+ if (!adev->gfx.enable_cleaner_shader)
+ return;
+
+ if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION)
+ idx = 0;
+ else
+ idx = ring->xcp_id;
+
+ if (idx >= MAX_XCP)
+ return;
+
+ /* Don't submit more work until KFD has had some time */
+ amdgpu_gfx_enforce_isolation_wait_for_kfd(adev, idx);
+
+ mutex_lock(&adev->enforce_isolation_mutex);
+ if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
+ if (adev->kfd.init_complete)
+ sched_work = true;
+ }
+ mutex_unlock(&adev->enforce_isolation_mutex);
+
+ if (sched_work)
+ amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
+}
+
+/**
+ * amdgpu_gfx_enforce_isolation_ring_end_use - End use of a ring with enforced isolation
+ * @ring: Pointer to the amdgpu_ring structure
+ *
+ * Ring end_use helper implementation for gfx which serializes access to the
+ * gfx IP between kernel submission IOCTLs and KFD user queues when isolation
+ * enforcement is enabled. The kernel submission IOCTLs and KFD user queues
+ * each get a time slice when both are active.
+ */
+void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ u32 idx;
+ bool sched_work = false;
+
+ if (!adev->gfx.enable_cleaner_shader)
+ return;
+
+ if (ring->xcp_id == AMDGPU_XCP_NO_PARTITION)
+ idx = 0;
+ else
+ idx = ring->xcp_id;
+
+ if (idx >= MAX_XCP)
+ return;
+
+ mutex_lock(&adev->enforce_isolation_mutex);
+ if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) {
+ if (adev->kfd.init_complete)
+ sched_work = true;
+ }
+ mutex_unlock(&adev->enforce_isolation_mutex);
+
+ if (sched_work)
+ amdgpu_gfx_kfd_sch_ctrl(adev, idx, true);
+}
+
+void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work)
+{
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device, gfx.idle_work.work);
+ enum PP_SMC_POWER_PROFILE profile;
+ u32 i, fences = 0;
+ int r;
+
+ if (adev->gfx.num_gfx_rings)
+ profile = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
+ else
+ profile = PP_SMC_POWER_PROFILE_COMPUTE;
+
+ for (i = 0; i < AMDGPU_MAX_GFX_RINGS; ++i)
+ fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]);
+ for (i = 0; i < (AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES); ++i)
+ fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]);
+ if (!fences && !atomic_read(&adev->gfx.total_submission_cnt)) {
+ mutex_lock(&adev->gfx.workload_profile_mutex);
+ if (adev->gfx.workload_profile_active) {
+ r = amdgpu_dpm_switch_power_profile(adev, profile, false);
+ if (r)
+ dev_warn(adev->dev, "(%d) failed to disable %s power profile mode\n", r,
+ profile == PP_SMC_POWER_PROFILE_FULLSCREEN3D ?
+ "fullscreen 3D" : "compute");
+ adev->gfx.workload_profile_active = false;
+ }
+ mutex_unlock(&adev->gfx.workload_profile_mutex);
+ } else {
+ schedule_delayed_work(&adev->gfx.idle_work, GFX_PROFILE_IDLE_TIMEOUT);
+ }
+}
+
+void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ enum PP_SMC_POWER_PROFILE profile;
+ int r;
+
+ if (amdgpu_dpm_is_overdrive_enabled(adev))
+ return;
+
+ if (adev->gfx.num_gfx_rings)
+ profile = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
+ else
+ profile = PP_SMC_POWER_PROFILE_COMPUTE;
+
+ atomic_inc(&adev->gfx.total_submission_cnt);
+
+ cancel_delayed_work_sync(&adev->gfx.idle_work);
+
+ /* We can safely return early here because we've cancelled the
+ * the delayed work so there is no one else to set it to false
+ * and we don't care if someone else sets it to true.
+ */
+ if (adev->gfx.workload_profile_active)
+ return;
+
+ mutex_lock(&adev->gfx.workload_profile_mutex);
+ if (!adev->gfx.workload_profile_active) {
+ r = amdgpu_dpm_switch_power_profile(adev, profile, true);
+ if (r)
+ dev_warn(adev->dev, "(%d) failed to disable %s power profile mode\n", r,
+ profile == PP_SMC_POWER_PROFILE_FULLSCREEN3D ?
+ "fullscreen 3D" : "compute");
+ adev->gfx.workload_profile_active = true;
+ }
+ mutex_unlock(&adev->gfx.workload_profile_mutex);
+}
+
+void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (amdgpu_dpm_is_overdrive_enabled(adev))
+ return;
+
+ atomic_dec(&ring->adev->gfx.total_submission_cnt);
+
+ schedule_delayed_work(&ring->adev->gfx.idle_work, GFX_PROFILE_IDLE_TIMEOUT);
+}
+
+/**
+ * amdgpu_gfx_csb_preamble_start - Set CSB preamble start
+ *
+ * @buffer: This is an output variable that gets the PACKET3 preamble setup.
+ *
+ * Return:
+ * return the latest index.
+ */
+u32 amdgpu_gfx_csb_preamble_start(u32 *buffer)
+{
+ u32 count = 0;
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL, 1));
+ buffer[count++] = cpu_to_le32(0x80000000);
+ buffer[count++] = cpu_to_le32(0x80000000);
+
+ return count;
+}
+
+/**
+ * amdgpu_gfx_csb_data_parser - Parser CS data
+ *
+ * @adev: amdgpu_device pointer used to get the CS data and other gfx info.
+ * @buffer: This is an output variable that gets the PACKET3 preamble end.
+ * @count: Index to start set the preemble end.
+ *
+ * Return:
+ * return the latest index.
+ */
+u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, u32 *buffer, u32 count)
+{
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+ u32 i;
+
+ for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT) {
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count));
+ buffer[count++] = cpu_to_le32(ext->reg_index - PACKET3_SET_CONTEXT_REG_START);
+
+ for (i = 0; i < ext->reg_count; i++)
+ buffer[count++] = cpu_to_le32(ext->extent[i]);
+ }
+ }
+ }
+
+ return count;
+}
+
+/**
+ * amdgpu_gfx_csb_preamble_end - Set CSB preamble end
+ *
+ * @buffer: This is an output variable that gets the PACKET3 preamble end.
+ * @count: Index to start set the preemble end.
+ */
+void amdgpu_gfx_csb_preamble_end(u32 *buffer, u32 count)
+{
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
+
+ buffer[count++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE, 0));
+ buffer[count++] = cpu_to_le32(0);
+}
+
+/*
+ * debugfs for to enable/disable gfx job submission to specific core.
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_gfx_sched_mask_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+
+ mask = (1ULL << adev->gfx.num_gfx_rings) - 1;
+ if ((val & mask) == 0)
+ return -EINVAL;
+
+ for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
+ ring = &adev->gfx.gfx_ring[i];
+ if (val & (1 << i))
+ ring->sched.ready = true;
+ else
+ ring->sched.ready = false;
+ }
+ /* publish sched.ready flag update effective immediately across smp */
+ smp_rmb();
+ return 0;
+}
+
+static int amdgpu_debugfs_gfx_sched_mask_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+ for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
+ ring = &adev->gfx.gfx_ring[i];
+ if (ring->sched.ready)
+ mask |= 1ULL << i;
+ }
+
+ *val = mask;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gfx_sched_mask_fops,
+ amdgpu_debugfs_gfx_sched_mask_get,
+ amdgpu_debugfs_gfx_sched_mask_set, "%llx\n");
+
+#endif
+
+void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+ char name[32];
+
+ if (!(adev->gfx.num_gfx_rings > 1))
+ return;
+ sprintf(name, "amdgpu_gfx_sched_mask");
+ debugfs_create_file(name, 0600, root, adev,
+ &amdgpu_debugfs_gfx_sched_mask_fops);
+#endif
+}
+
+/*
+ * debugfs for to enable/disable compute job submission to specific core.
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_compute_sched_mask_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+
+ mask = (1ULL << adev->gfx.num_compute_rings) - 1;
+ if ((val & mask) == 0)
+ return -EINVAL;
+
+ for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
+ ring = &adev->gfx.compute_ring[i];
+ if (val & (1 << i))
+ ring->sched.ready = true;
+ else
+ ring->sched.ready = false;
+ }
+
+ /* publish sched.ready flag update effective immediately across smp */
+ smp_rmb();
+ return 0;
+}
+
+static int amdgpu_debugfs_compute_sched_mask_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+ for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
+ ring = &adev->gfx.compute_ring[i];
+ if (ring->sched.ready)
+ mask |= 1ULL << i;
+ }
+
+ *val = mask;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_compute_sched_mask_fops,
+ amdgpu_debugfs_compute_sched_mask_get,
+ amdgpu_debugfs_compute_sched_mask_set, "%llx\n");
+
+#endif
+
+void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+ char name[32];
+
+ if (!(adev->gfx.num_compute_rings > 1))
+ return;
+ sprintf(name, "amdgpu_compute_sched_mask");
+ debugfs_create_file(name, 0600, root, adev,
+ &amdgpu_debugfs_compute_sched_mask_fops);
+#endif
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index e02044086445..fb5f7a0ee029 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -24,10 +24,647 @@
#ifndef __AMDGPU_GFX_H__
#define __AMDGPU_GFX_H__
-int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg);
-void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg);
+/*
+ * GFX stuff
+ */
+#include "clearstate_defs.h"
+#include "amdgpu_ring.h"
+#include "amdgpu_rlc.h"
+#include "amdgpu_imu.h"
+#include "soc15.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_ring_mux.h"
+#include "amdgpu_xcp.h"
+
+/* GFX current status */
+#define AMDGPU_GFX_NORMAL_MODE 0x00000000L
+#define AMDGPU_GFX_SAFE_MODE 0x00000001L
+#define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L
+#define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L
+#define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
+
+#define AMDGPU_MAX_GC_INSTANCES 8
+#define AMDGPU_MAX_QUEUES 128
+
+#define AMDGPU_MAX_GFX_QUEUES AMDGPU_MAX_QUEUES
+#define AMDGPU_MAX_COMPUTE_QUEUES AMDGPU_MAX_QUEUES
+
+enum amdgpu_gfx_pipe_priority {
+ AMDGPU_GFX_PIPE_PRIO_NORMAL = AMDGPU_RING_PRIO_1,
+ AMDGPU_GFX_PIPE_PRIO_HIGH = AMDGPU_RING_PRIO_2
+};
+
+#define AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM 0
+#define AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM 15
+
+/* 1 second timeout */
+#define GFX_PROFILE_IDLE_TIMEOUT msecs_to_jiffies(1000)
+
+enum amdgpu_gfx_partition {
+ AMDGPU_SPX_PARTITION_MODE = 0,
+ AMDGPU_DPX_PARTITION_MODE = 1,
+ AMDGPU_TPX_PARTITION_MODE = 2,
+ AMDGPU_QPX_PARTITION_MODE = 3,
+ AMDGPU_CPX_PARTITION_MODE = 4,
+ AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE = -1,
+ /* Automatically choose the right mode */
+ AMDGPU_AUTO_COMPUTE_PARTITION_MODE = -2,
+};
+
+#define NUM_XCC(x) hweight16(x)
+
+enum amdgpu_gfx_ras_mem_id_type {
+ AMDGPU_GFX_CP_MEM = 0,
+ AMDGPU_GFX_GCEA_MEM,
+ AMDGPU_GFX_GC_CANE_MEM,
+ AMDGPU_GFX_GCUTCL2_MEM,
+ AMDGPU_GFX_GDS_MEM,
+ AMDGPU_GFX_LDS_MEM,
+ AMDGPU_GFX_RLC_MEM,
+ AMDGPU_GFX_SP_MEM,
+ AMDGPU_GFX_SPI_MEM,
+ AMDGPU_GFX_SQC_MEM,
+ AMDGPU_GFX_SQ_MEM,
+ AMDGPU_GFX_TA_MEM,
+ AMDGPU_GFX_TCC_MEM,
+ AMDGPU_GFX_TCA_MEM,
+ AMDGPU_GFX_TCI_MEM,
+ AMDGPU_GFX_TCP_MEM,
+ AMDGPU_GFX_TD_MEM,
+ AMDGPU_GFX_TCX_MEM,
+ AMDGPU_GFX_ATC_L2_MEM,
+ AMDGPU_GFX_UTCL2_MEM,
+ AMDGPU_GFX_VML2_MEM,
+ AMDGPU_GFX_VML2_WALKER_MEM,
+ AMDGPU_GFX_MEM_TYPE_NUM
+};
+
+struct amdgpu_mec {
+ struct amdgpu_bo *hpd_eop_obj;
+ u64 hpd_eop_gpu_addr;
+ struct amdgpu_bo *mec_fw_obj;
+ u64 mec_fw_gpu_addr;
+ struct amdgpu_bo *mec_fw_data_obj;
+ u64 mec_fw_data_gpu_addr;
+
+ u32 num_mec;
+ u32 num_pipe_per_mec;
+ u32 num_queue_per_pipe;
+ void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES];
+};
+
+struct amdgpu_mec_bitmap {
+ /* These are the resources for which amdgpu takes ownership */
+ DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
+};
+
+enum amdgpu_unmap_queues_action {
+ PREEMPT_QUEUES = 0,
+ RESET_QUEUES,
+ DISABLE_PROCESS_QUEUES,
+ PREEMPT_QUEUES_NO_UNMAP,
+};
+
+struct kiq_pm4_funcs {
+ /* Support ASIC-specific kiq pm4 packets*/
+ void (*kiq_set_resources)(struct amdgpu_ring *kiq_ring,
+ uint64_t queue_mask);
+ void (*kiq_map_queues)(struct amdgpu_ring *kiq_ring,
+ struct amdgpu_ring *ring);
+ void (*kiq_unmap_queues)(struct amdgpu_ring *kiq_ring,
+ struct amdgpu_ring *ring,
+ enum amdgpu_unmap_queues_action action,
+ u64 gpu_addr, u64 seq);
+ void (*kiq_query_status)(struct amdgpu_ring *kiq_ring,
+ struct amdgpu_ring *ring,
+ u64 addr,
+ u64 seq);
+ void (*kiq_invalidate_tlbs)(struct amdgpu_ring *kiq_ring,
+ uint16_t pasid, uint32_t flush_type,
+ bool all_hub);
+ void (*kiq_reset_hw_queue)(struct amdgpu_ring *kiq_ring,
+ uint32_t queue_type, uint32_t me_id,
+ uint32_t pipe_id, uint32_t queue_id,
+ uint32_t xcc_id, uint32_t vmid);
+ /* Packet sizes */
+ int set_resources_size;
+ int map_queues_size;
+ int unmap_queues_size;
+ int query_status_size;
+ int invalidate_tlbs_size;
+};
+
+struct amdgpu_kiq {
+ u64 eop_gpu_addr;
+ struct amdgpu_bo *eop_obj;
+ spinlock_t ring_lock;
+ struct amdgpu_ring ring;
+ struct amdgpu_irq_src irq;
+ const struct kiq_pm4_funcs *pmf;
+ void *mqd_backup;
+};
+
+/*
+ * GFX configurations
+ */
+#define AMDGPU_GFX_MAX_SE 4
+#define AMDGPU_GFX_MAX_SH_PER_SE 2
+
+/**
+ * amdgpu_rb_config - Configure a single Render Backend (RB)
+ *
+ * Bad RBs are fused off and there is a harvest register the driver reads to
+ * determine which RB(s) are fused off so that the driver can configure the
+ * hardware state so that nothing gets sent to them. There are also user
+ * harvest registers that the driver can program to disable additional RBs,
+ * etc., for testing purposes.
+ */
+struct amdgpu_rb_config {
+ /**
+ * @rb_backend_disable:
+ *
+ * The value captured from register RB_BACKEND_DISABLE indicates if the
+ * RB backend is disabled or not.
+ */
+ uint32_t rb_backend_disable;
+
+ /**
+ * @user_rb_backend_disable:
+ *
+ * The value captured from register USER_RB_BACKEND_DISABLE indicates
+ * if the User RB backend is disabled or not.
+ */
+ uint32_t user_rb_backend_disable;
+
+ /**
+ * @raster_config:
+ *
+ * To set up all of the states, it is necessary to have two registers
+ * to keep all of the states. This field holds the first register.
+ */
+ uint32_t raster_config;
+
+ /**
+ * @raster_config_1:
+ *
+ * To set up all of the states, it is necessary to have two registers
+ * to keep all of the states. This field holds the second register.
+ */
+ uint32_t raster_config_1;
+};
+
+struct gb_addr_config {
+ uint16_t pipe_interleave_size;
+ uint8_t num_pipes;
+ uint8_t max_compress_frags;
+ uint8_t num_banks;
+ uint8_t num_se;
+ uint8_t num_rb_per_se;
+ uint8_t num_pkrs;
+};
+
+struct amdgpu_gfx_config {
+ unsigned max_shader_engines;
+ unsigned max_tile_pipes;
+ unsigned max_cu_per_sh;
+ unsigned max_sh_per_se;
+ unsigned max_backends_per_se;
+ unsigned max_texture_channel_caches;
+ unsigned max_gprs;
+ unsigned max_gs_threads;
+ unsigned max_hw_contexts;
+ unsigned sc_prim_fifo_size_frontend;
+ unsigned sc_prim_fifo_size_backend;
+ unsigned sc_hiz_tile_fifo_size;
+ unsigned sc_earlyz_tile_fifo_size;
+
+ unsigned num_tile_pipes;
+ unsigned backend_enable_mask;
+ unsigned mem_max_burst_length_bytes;
+ unsigned mem_row_size_in_kb;
+ unsigned shader_engine_tile_size;
+ unsigned num_gpus;
+ unsigned multi_gpu_tile_size;
+ unsigned mc_arb_ramcfg;
+ unsigned num_banks;
+ unsigned num_ranks;
+ unsigned gb_addr_config;
+ unsigned num_rbs;
+ unsigned gs_vgt_table_depth;
+ unsigned gs_prim_buffer_depth;
+
+ uint32_t tile_mode_array[32];
+ uint32_t macrotile_mode_array[16];
+
+ struct gb_addr_config gb_addr_config_fields;
+
+ /**
+ * @rb_config:
+ *
+ * Matrix that keeps all the Render Backend (color and depth buffer
+ * handling) configuration on the 3D engine.
+ */
+ struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE];
+
+ /* gfx configure feature */
+ uint32_t double_offchip_lds_buf;
+ /* cached value of DB_DEBUG2 */
+ uint32_t db_debug2;
+ /* gfx10 specific config */
+ uint32_t num_sc_per_sh;
+ uint32_t num_packer_per_sc;
+ uint32_t pa_sc_tile_steering_override;
+ /* Whether texture coordinate truncation is conformant. */
+ bool ta_cntl2_truncate_coord_mode;
+ uint64_t tcc_disabled_mask;
+ uint32_t gc_num_tcp_per_sa;
+ uint32_t gc_num_sdp_interface;
+ uint32_t gc_num_tcps;
+ uint32_t gc_num_tcp_per_wpg;
+ uint32_t gc_tcp_l1_size;
+ uint32_t gc_num_sqc_per_wgp;
+ uint32_t gc_l1_instruction_cache_size_per_sqc;
+ uint32_t gc_l1_data_cache_size_per_sqc;
+ uint32_t gc_gl1c_per_sa;
+ uint32_t gc_gl1c_size_per_instance;
+ uint32_t gc_gl2c_per_gpu;
+ uint32_t gc_tcp_size_per_cu;
+ uint32_t gc_num_cu_per_sqc;
+ uint32_t gc_tcc_size;
+ uint32_t gc_tcp_cache_line_size;
+ uint32_t gc_instruction_cache_size_per_sqc;
+ uint32_t gc_instruction_cache_line_size;
+ uint32_t gc_scalar_data_cache_size_per_sqc;
+ uint32_t gc_scalar_data_cache_line_size;
+ uint32_t gc_tcc_cache_line_size;
+};
+
+struct amdgpu_cu_info {
+ uint32_t simd_per_cu;
+ uint32_t max_waves_per_simd;
+ uint32_t wave_front_size;
+ uint32_t max_scratch_slots_per_cu;
+ uint32_t lds_size;
+
+ /* total active CU number */
+ uint32_t number;
+ uint32_t ao_cu_mask;
+ uint32_t ao_cu_bitmap[4][4];
+ uint32_t bitmap[AMDGPU_MAX_GC_INSTANCES][4][4];
+};
+
+struct amdgpu_gfx_ras {
+ struct amdgpu_ras_block_object ras_block;
+ void (*enable_watchdog_timer)(struct amdgpu_device *adev);
+ int (*rlc_gc_fed_irq)(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
+ int (*poison_consumption_handler)(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry);
+};
+
+struct amdgpu_gfx_shadow_info {
+ u32 shadow_size;
+ u32 shadow_alignment;
+ u32 csa_size;
+ u32 csa_alignment;
+};
+
+struct amdgpu_gfx_funcs {
+ /* get the gpu clock counter */
+ uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
+ void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num,
+ u32 sh_num, u32 instance, int xcc_id);
+ void (*read_wave_data)(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
+ uint32_t wave, uint32_t *dst, int *no_fields);
+ void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
+ uint32_t wave, uint32_t thread, uint32_t start,
+ uint32_t size, uint32_t *dst);
+ void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd,
+ uint32_t wave, uint32_t start, uint32_t size,
+ uint32_t *dst);
+ void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe,
+ u32 queue, u32 vmid, u32 xcc_id);
+ void (*init_spm_golden)(struct amdgpu_device *adev);
+ void (*update_perfmon_mgcg)(struct amdgpu_device *adev, bool enable);
+ int (*get_gfx_shadow_info)(struct amdgpu_device *adev,
+ struct amdgpu_gfx_shadow_info *shadow_info,
+ bool skip_check);
+ enum amdgpu_gfx_partition
+ (*query_partition_mode)(struct amdgpu_device *adev);
+ int (*switch_partition_mode)(struct amdgpu_device *adev,
+ int num_xccs_per_xcp);
+ int (*ih_node_to_logical_xcc)(struct amdgpu_device *adev, int ih_node);
+ int (*get_xccs_per_xcp)(struct amdgpu_device *adev);
+};
+
+struct sq_work {
+ struct work_struct work;
+ unsigned ih_data;
+};
+
+struct amdgpu_pfp {
+ struct amdgpu_bo *pfp_fw_obj;
+ uint64_t pfp_fw_gpu_addr;
+ uint32_t *pfp_fw_ptr;
+
+ struct amdgpu_bo *pfp_fw_data_obj;
+ uint64_t pfp_fw_data_gpu_addr;
+ uint32_t *pfp_fw_data_ptr;
+};
+
+struct amdgpu_ce {
+ struct amdgpu_bo *ce_fw_obj;
+ uint64_t ce_fw_gpu_addr;
+ uint32_t *ce_fw_ptr;
+};
+
+struct amdgpu_me {
+ struct amdgpu_bo *me_fw_obj;
+ uint64_t me_fw_gpu_addr;
+ uint32_t *me_fw_ptr;
+
+ struct amdgpu_bo *me_fw_data_obj;
+ uint64_t me_fw_data_gpu_addr;
+ uint32_t *me_fw_data_ptr;
+
+ uint32_t num_me;
+ uint32_t num_pipe_per_me;
+ uint32_t num_queue_per_pipe;
+ void *mqd_backup[AMDGPU_MAX_GFX_RINGS];
+
+ /* These are the resources for which amdgpu takes ownership */
+ DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
+};
+
+struct amdgpu_isolation_work {
+ struct amdgpu_device *adev;
+ u32 xcp_id;
+ struct delayed_work work;
+};
+
+struct amdgpu_gfx {
+ struct mutex gpu_clock_mutex;
+ struct amdgpu_gfx_config config;
+ struct amdgpu_rlc rlc;
+ struct amdgpu_pfp pfp;
+ struct amdgpu_ce ce;
+ struct amdgpu_me me;
+ struct amdgpu_mec mec;
+ struct amdgpu_mec_bitmap mec_bitmap[AMDGPU_MAX_GC_INSTANCES];
+ struct amdgpu_kiq kiq[AMDGPU_MAX_GC_INSTANCES];
+ struct amdgpu_imu imu;
+ bool rs64_enable; /* firmware format */
+ const struct firmware *me_fw; /* ME firmware */
+ uint32_t me_fw_version;
+ const struct firmware *pfp_fw; /* PFP firmware */
+ uint32_t pfp_fw_version;
+ const struct firmware *ce_fw; /* CE firmware */
+ uint32_t ce_fw_version;
+ const struct firmware *rlc_fw; /* RLC firmware */
+ uint32_t rlc_fw_version;
+ const struct firmware *mec_fw; /* MEC firmware */
+ uint32_t mec_fw_version;
+ const struct firmware *mec2_fw; /* MEC2 firmware */
+ uint32_t mec2_fw_version;
+ const struct firmware *imu_fw; /* IMU firmware */
+ uint32_t imu_fw_version;
+ uint32_t me_feature_version;
+ uint32_t ce_feature_version;
+ uint32_t pfp_feature_version;
+ uint32_t rlc_feature_version;
+ uint32_t rlc_srlc_fw_version;
+ uint32_t rlc_srlc_feature_version;
+ uint32_t rlc_srlg_fw_version;
+ uint32_t rlc_srlg_feature_version;
+ uint32_t rlc_srls_fw_version;
+ uint32_t rlc_srls_feature_version;
+ uint32_t rlcp_ucode_version;
+ uint32_t rlcp_ucode_feature_version;
+ uint32_t rlcv_ucode_version;
+ uint32_t rlcv_ucode_feature_version;
+ uint32_t mec_feature_version;
+ uint32_t mec2_feature_version;
+ bool mec_fw_write_wait;
+ bool me_fw_write_wait;
+ bool cp_fw_write_wait;
+ struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
+ unsigned num_gfx_rings;
+ struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES];
+ unsigned num_compute_rings;
+ struct amdgpu_irq_src eop_irq;
+ struct amdgpu_irq_src priv_reg_irq;
+ struct amdgpu_irq_src priv_inst_irq;
+ struct amdgpu_irq_src bad_op_irq;
+ struct amdgpu_irq_src cp_ecc_error_irq;
+ struct amdgpu_irq_src sq_irq;
+ struct amdgpu_irq_src rlc_gc_fed_irq;
+ struct sq_work sq_work;
+
+ /* gfx status */
+ uint32_t gfx_current_status;
+ /* ce ram size*/
+ unsigned ce_ram_size;
+ struct amdgpu_cu_info cu_info;
+ const struct amdgpu_gfx_funcs *funcs;
+
+ /* reset mask */
+ uint32_t grbm_soft_reset;
+ uint32_t srbm_soft_reset;
+ uint32_t gfx_supported_reset;
+ uint32_t compute_supported_reset;
+
+ /* gfx off */
+ bool gfx_off_state; /* true: enabled, false: disabled */
+ struct mutex gfx_off_mutex; /* mutex to change gfxoff state */
+ uint32_t gfx_off_req_count; /* default 1, enable gfx off: dec 1, disable gfx off: add 1 */
+ struct delayed_work gfx_off_delay_work; /* async work to set gfx block off */
+ uint32_t gfx_off_residency; /* last logged residency */
+ uint64_t gfx_off_entrycount; /* count of times GPU has get into GFXOFF state */
+
+ /* pipe reservation */
+ struct mutex pipe_reserve_mutex;
+ DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
+
+ /*ras */
+ struct ras_common_if *ras_if;
+ struct amdgpu_gfx_ras *ras;
+
+ bool is_poweron;
+
+ struct amdgpu_ring sw_gfx_ring[AMDGPU_MAX_SW_GFX_RINGS];
+ struct amdgpu_ring_mux muxer;
+
+ bool cp_gfx_shadow; /* for gfx11 */
+
+ uint16_t xcc_mask;
+ uint32_t num_xcc_per_xcp;
+ struct mutex partition_mutex;
+ bool mcbp; /* mid command buffer preemption */
+
+ /* IP reg dump */
+ uint32_t *ip_dump_core;
+ uint32_t *ip_dump_compute_queues;
+ uint32_t *ip_dump_gfx_queues;
+
+ struct mutex reset_sem_mutex;
+
+ /* cleaner shader */
+ struct amdgpu_bo *cleaner_shader_obj;
+ unsigned int cleaner_shader_size;
+ u64 cleaner_shader_gpu_addr;
+ void *cleaner_shader_cpu_ptr;
+ const void *cleaner_shader_ptr;
+ bool enable_cleaner_shader;
+ struct amdgpu_isolation_work enforce_isolation[MAX_XCP];
+ /* Mutex for synchronizing KFD scheduler operations */
+ struct mutex userq_sch_mutex;
+ u64 userq_sch_req_count[MAX_XCP];
+ bool userq_sch_inactive[MAX_XCP];
+ unsigned long enforce_isolation_jiffies[MAX_XCP];
+ unsigned long enforce_isolation_time[MAX_XCP];
+
+ atomic_t total_submission_cnt;
+ struct delayed_work idle_work;
+ bool workload_profile_active;
+ struct mutex workload_profile_mutex;
+
+ bool disable_kq;
+ bool disable_uq;
+};
+
+struct amdgpu_gfx_ras_reg_entry {
+ struct amdgpu_ras_err_status_reg_entry reg_entry;
+ enum amdgpu_gfx_ras_mem_id_type mem_id_type;
+ uint32_t se_num;
+};
+
+struct amdgpu_gfx_ras_mem_id_entry {
+ const struct amdgpu_ras_memory_id_entry *mem_id_ent;
+ uint32_t size;
+};
+
+#define AMDGPU_GFX_MEMID_ENT(x) {(x), ARRAY_SIZE(x)},
+
+#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
+#define amdgpu_gfx_select_se_sh(adev, se, sh, instance, xcc_id) ((adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance), (xcc_id)))
+#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid, xcc_id) ((adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid), (xcc_id)))
+#define amdgpu_gfx_init_spm_golden(adev) (adev)->gfx.funcs->init_spm_golden((adev))
+#define amdgpu_gfx_get_gfx_shadow_info(adev, si) ((adev)->gfx.funcs->get_gfx_shadow_info((adev), (si), false))
+
+/**
+ * amdgpu_gfx_create_bitmask - create a bitmask
+ *
+ * @bit_width: length of the mask
+ *
+ * create a variable length bit mask.
+ * Returns the bitmask.
+ */
+static inline u32 amdgpu_gfx_create_bitmask(u32 bit_width)
+{
+ return (u32)((1ULL << bit_width) - 1);
+}
void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se,
- unsigned max_sh);
+ unsigned max_sh);
+
+int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, int xcc_id);
+
+void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring);
+
+void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id);
+int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
+ unsigned hpd_size, int xcc_id);
+
+int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
+ unsigned mqd_size, int xcc_id);
+void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id);
+int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id);
+int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id);
+int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id);
+int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id);
+
+void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev);
+void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev);
+
+int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
+ int pipe, int queue);
+void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
+ int *mec, int *pipe, int *queue);
+bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int xcc_id,
+ int mec, int pipe, int queue);
+bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
+bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
+bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me,
+ int pipe, int queue);
+void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable);
+void amdgpu_gfx_off_ctrl_immediate(struct amdgpu_device *adev, bool enable);
+int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value);
+int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
+void amdgpu_gfx_ras_fini(struct amdgpu_device *adev);
+int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value);
+int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *residency);
+int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value);
+int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry);
+int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
+uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id);
+void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id);
+int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev);
+void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id);
+
+int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev);
+int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry);
+
+bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id);
+int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev);
+void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev);
+void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev,
+ void *ras_error_status,
+ void (*func)(struct amdgpu_device *adev, void *ras_error_status,
+ int xcc_id));
+int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev,
+ unsigned int cleaner_shader_size);
+void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev);
+void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev,
+ unsigned int cleaner_shader_size,
+ const void *cleaner_shader_ptr);
+void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work);
+void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring);
+void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring);
+
+void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work);
+void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring);
+void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring);
+u32 amdgpu_gfx_csb_preamble_start(u32 *buffer);
+u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, u32 *buffer, u32 count);
+void amdgpu_gfx_csb_preamble_end(u32 *buffer, u32 count);
+
+void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev);
+
+static inline const char *amdgpu_gfx_compute_mode_desc(int mode)
+{
+ switch (mode) {
+ case AMDGPU_SPX_PARTITION_MODE:
+ return "SPX";
+ case AMDGPU_DPX_PARTITION_MODE:
+ return "DPX";
+ case AMDGPU_TPX_PARTITION_MODE:
+ return "TPX";
+ case AMDGPU_QPX_PARTITION_MODE:
+ return "QPX";
+ case AMDGPU_CPX_PARTITION_MODE:
+ return "CPX";
+ default:
+ return "UNKNOWN";
+ }
+}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
new file mode 100644
index 000000000000..c7b44aeb671b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_GFXHUB_H__
+#define __AMDGPU_GFXHUB_H__
+
+struct amdgpu_gfxhub_funcs {
+ u64 (*get_fb_location)(struct amdgpu_device *adev);
+ u64 (*get_mc_fb_offset)(struct amdgpu_device *adev);
+ void (*setup_vm_pt_regs)(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base);
+ int (*gart_enable)(struct amdgpu_device *adev);
+
+ void (*gart_disable)(struct amdgpu_device *adev);
+ void (*set_fault_enable_default)(struct amdgpu_device *adev, bool value);
+ void (*init)(struct amdgpu_device *adev);
+ int (*get_xgmi_info)(struct amdgpu_device *adev);
+ void (*utcl2_harvest)(struct amdgpu_device *adev);
+ void (*mode2_save_regs)(struct amdgpu_device *adev);
+ void (*mode2_restore_regs)(struct amdgpu_device *adev);
+ void (*halt)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_gfxhub {
+ const struct amdgpu_gfxhub_funcs *funcs;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
new file mode 100644
index 000000000000..9dcf51991b5b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -0,0 +1,1679 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+
+#include <linux/io-64-nonatomic-lo-hi.h>
+#ifdef CONFIG_X86
+#include <asm/hypervisor.h>
+#endif
+
+#include "amdgpu.h"
+#include "amdgpu_gmc.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_xgmi.h"
+
+#include <drm/drm_drv.h>
+#include <drm/ttm/ttm_tt.h>
+
+static const u64 four_gb = 0x100000000ULL;
+
+bool amdgpu_gmc_is_pdb0_enabled(struct amdgpu_device *adev)
+{
+ return adev->gmc.xgmi.connected_to_cpu || amdgpu_virt_xgmi_migrate_enabled(adev);
+}
+
+/**
+ * amdgpu_gmc_pdb0_alloc - allocate vram for pdb0
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate video memory for pdb0 and map it for CPU access
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev)
+{
+ int r;
+ struct amdgpu_bo_param bp;
+ u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
+ uint32_t pde0_page_shift = adev->gmc.vmid0_page_table_block_size + 21;
+ uint32_t npdes = (vram_size + (1ULL << pde0_page_shift) - 1) >> pde0_page_shift;
+
+ memset(&bp, 0, sizeof(bp));
+ bp.size = PAGE_ALIGN((npdes + 1) * 8);
+ bp.byte_align = PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
+ bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ bp.type = ttm_bo_type_kernel;
+ bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
+ r = amdgpu_bo_create(adev, &bp, &adev->gmc.pdb0_bo);
+ if (r)
+ return r;
+
+ r = amdgpu_bo_reserve(adev->gmc.pdb0_bo, false);
+ if (unlikely(r != 0))
+ goto bo_reserve_failure;
+
+ r = amdgpu_bo_pin(adev->gmc.pdb0_bo, AMDGPU_GEM_DOMAIN_VRAM);
+ if (r)
+ goto bo_pin_failure;
+ r = amdgpu_bo_kmap(adev->gmc.pdb0_bo, &adev->gmc.ptr_pdb0);
+ if (r)
+ goto bo_kmap_failure;
+
+ amdgpu_bo_unreserve(adev->gmc.pdb0_bo);
+ return 0;
+
+bo_kmap_failure:
+ amdgpu_bo_unpin(adev->gmc.pdb0_bo);
+bo_pin_failure:
+ amdgpu_bo_unreserve(adev->gmc.pdb0_bo);
+bo_reserve_failure:
+ amdgpu_bo_unref(&adev->gmc.pdb0_bo);
+ return r;
+}
+
+/**
+ * amdgpu_gmc_get_pde_for_bo - get the PDE for a BO
+ *
+ * @bo: the BO to get the PDE for
+ * @level: the level in the PD hirarchy
+ * @addr: resulting addr
+ * @flags: resulting flags
+ *
+ * Get the address and flags to be used for a PDE (Page Directory Entry).
+ */
+void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
+ uint64_t *addr, uint64_t *flags)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
+ switch (bo->tbo.resource->mem_type) {
+ case TTM_PL_TT:
+ *addr = bo->tbo.ttm->dma_address[0];
+ break;
+ case TTM_PL_VRAM:
+ *addr = amdgpu_bo_gpu_offset(bo);
+ break;
+ default:
+ *addr = 0;
+ break;
+ }
+ *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, bo->tbo.resource);
+ amdgpu_gmc_get_vm_pde(adev, level, addr, flags);
+}
+
+/*
+ * amdgpu_gmc_pd_addr - return the address of the root directory
+ */
+uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ uint64_t pd_addr;
+
+ /* TODO: move that into ASIC specific code */
+ if (adev->asic_type >= CHIP_VEGA10) {
+ uint64_t flags = AMDGPU_PTE_VALID;
+
+ amdgpu_gmc_get_pde_for_bo(bo, -1, &pd_addr, &flags);
+ pd_addr |= flags;
+ } else {
+ pd_addr = amdgpu_bo_gpu_offset(bo);
+ }
+ return pd_addr;
+}
+
+/**
+ * amdgpu_gmc_set_pte_pde - update the page tables using CPU
+ *
+ * @adev: amdgpu_device pointer
+ * @cpu_pt_addr: cpu address of the page table
+ * @gpu_page_idx: entry in the page table to update
+ * @addr: dst addr to write into pte/pde
+ * @flags: access flags
+ *
+ * Update the page tables using CPU.
+ */
+int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
+ uint32_t gpu_page_idx, uint64_t addr,
+ uint64_t flags)
+{
+ void __iomem *ptr = (void *)cpu_pt_addr;
+ uint64_t value;
+
+ /*
+ * The following is for PTE only. GART does not have PDEs.
+ */
+ value = addr & 0x0000FFFFFFFFF000ULL;
+ value |= flags;
+ writeq(value, ptr + (gpu_page_idx * 8));
+
+ return 0;
+}
+
+/**
+ * amdgpu_gmc_agp_addr - return the address in the AGP address space
+ *
+ * @bo: TTM BO which needs the address, must be in GTT domain
+ *
+ * Tries to figure out how to access the BO through the AGP aperture. Returns
+ * AMDGPU_BO_INVALID_OFFSET if that is not possible.
+ */
+uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+
+ if (!bo->ttm)
+ return AMDGPU_BO_INVALID_OFFSET;
+
+ if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached)
+ return AMDGPU_BO_INVALID_OFFSET;
+
+ if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
+ return AMDGPU_BO_INVALID_OFFSET;
+
+ return adev->gmc.agp_start + bo->ttm->dma_address[0];
+}
+
+/**
+ * amdgpu_gmc_vram_location - try to find VRAM location
+ *
+ * @adev: amdgpu device structure holding all necessary information
+ * @mc: memory controller structure holding memory information
+ * @base: base address at which to put VRAM
+ *
+ * Function will try to place VRAM at base address provided
+ * as parameter.
+ */
+void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
+ u64 base)
+{
+ uint64_t vis_limit = (uint64_t)amdgpu_vis_vram_limit << 20;
+ uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
+
+ mc->vram_start = base;
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ if (limit < mc->real_vram_size)
+ mc->real_vram_size = limit;
+
+ if (vis_limit && vis_limit < mc->visible_vram_size)
+ mc->visible_vram_size = vis_limit;
+
+ if (mc->real_vram_size < mc->visible_vram_size)
+ mc->visible_vram_size = mc->real_vram_size;
+
+ if (mc->xgmi.num_physical_nodes == 0) {
+ mc->fb_start = mc->vram_start;
+ mc->fb_end = mc->vram_end;
+ }
+ dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
+ mc->mc_vram_size >> 20, mc->vram_start,
+ mc->vram_end, mc->real_vram_size >> 20);
+}
+
+/** amdgpu_gmc_sysvm_location - place vram and gart in sysvm aperture
+ *
+ * @adev: amdgpu device structure holding all necessary information
+ * @mc: memory controller structure holding memory information
+ *
+ * This function is only used if use GART for FB translation. In such
+ * case, we use sysvm aperture (vmid0 page tables) for both vram
+ * and gart (aka system memory) access.
+ *
+ * GPUVM (and our organization of vmid0 page tables) require sysvm
+ * aperture to be placed at a location aligned with 8 times of native
+ * page size. For example, if vm_context0_cntl.page_table_block_size
+ * is 12, then native page size is 8G (2M*2^12), sysvm should start
+ * with a 64G aligned address. For simplicity, we just put sysvm at
+ * address 0. So vram start at address 0 and gart is right after vram.
+ */
+void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
+{
+ u64 hive_vram_start = 0;
+ u64 hive_vram_end = mc->xgmi.node_segment_size * mc->xgmi.num_physical_nodes - 1;
+ mc->vram_start = mc->xgmi.node_segment_size * mc->xgmi.physical_node_id;
+ mc->vram_end = mc->vram_start + mc->xgmi.node_segment_size - 1;
+ /* node_segment_size may not 4GB aligned on SRIOV, align up is needed. */
+ mc->gart_start = ALIGN(hive_vram_end + 1, four_gb);
+ mc->gart_end = mc->gart_start + mc->gart_size - 1;
+ if (amdgpu_virt_xgmi_migrate_enabled(adev)) {
+ /* set mc->vram_start to 0 to switch the returned GPU address of
+ * amdgpu_bo_create_reserved() from FB aperture to GART aperture.
+ */
+ mc->vram_start = 0;
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ mc->visible_vram_size = min(mc->visible_vram_size, mc->real_vram_size);
+ } else {
+ mc->fb_start = hive_vram_start;
+ mc->fb_end = hive_vram_end;
+ }
+ dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
+ mc->mc_vram_size >> 20, mc->vram_start,
+ mc->vram_end, mc->real_vram_size >> 20);
+ dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
+ mc->gart_size >> 20, mc->gart_start, mc->gart_end);
+}
+
+/**
+ * amdgpu_gmc_gart_location - try to find GART location
+ *
+ * @adev: amdgpu device structure holding all necessary information
+ * @mc: memory controller structure holding memory information
+ * @gart_placement: GART placement policy with respect to VRAM
+ *
+ * Function will try to place GART before or after VRAM.
+ * If GART size is bigger than space left then we ajust GART size.
+ * Thus function will never fails.
+ */
+void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
+ enum amdgpu_gart_placement gart_placement)
+{
+ u64 size_af, size_bf;
+ /*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/
+ u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1);
+
+ /* VCE doesn't like it when BOs cross a 4GB segment, so align
+ * the GART base on a 4GB boundary as well.
+ */
+ size_bf = mc->fb_start;
+ size_af = max_mc_address + 1 - ALIGN(mc->fb_end + 1, four_gb);
+
+ if (mc->gart_size > max(size_bf, size_af)) {
+ dev_warn(adev->dev, "limiting GART\n");
+ mc->gart_size = max(size_bf, size_af);
+ }
+
+ switch (gart_placement) {
+ case AMDGPU_GART_PLACEMENT_HIGH:
+ mc->gart_start = max_mc_address - mc->gart_size + 1;
+ break;
+ case AMDGPU_GART_PLACEMENT_LOW:
+ mc->gart_start = 0;
+ break;
+ case AMDGPU_GART_PLACEMENT_BEST_FIT:
+ default:
+ if ((size_bf >= mc->gart_size && size_bf < size_af) ||
+ (size_af < mc->gart_size))
+ mc->gart_start = 0;
+ else
+ mc->gart_start = max_mc_address - mc->gart_size + 1;
+ break;
+ }
+
+ mc->gart_start &= ~(four_gb - 1);
+ mc->gart_end = mc->gart_start + mc->gart_size - 1;
+ dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
+ mc->gart_size >> 20, mc->gart_start, mc->gart_end);
+}
+
+/**
+ * amdgpu_gmc_agp_location - try to find AGP location
+ * @adev: amdgpu device structure holding all necessary information
+ * @mc: memory controller structure holding memory information
+ *
+ * Function will place try to find a place for the AGP BAR in the MC address
+ * space.
+ *
+ * AGP BAR will be assigned the largest available hole in the address space.
+ * Should be called after VRAM and GART locations are setup.
+ */
+void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
+{
+ const uint64_t sixteen_gb = 1ULL << 34;
+ const uint64_t sixteen_gb_mask = ~(sixteen_gb - 1);
+ u64 size_af, size_bf;
+
+ if (mc->fb_start > mc->gart_start) {
+ size_bf = (mc->fb_start & sixteen_gb_mask) -
+ ALIGN(mc->gart_end + 1, sixteen_gb);
+ size_af = mc->mc_mask + 1 - ALIGN(mc->fb_end + 1, sixteen_gb);
+ } else {
+ size_bf = mc->fb_start & sixteen_gb_mask;
+ size_af = (mc->gart_start & sixteen_gb_mask) -
+ ALIGN(mc->fb_end + 1, sixteen_gb);
+ }
+
+ if (size_bf > size_af) {
+ mc->agp_start = (mc->fb_start - size_bf) & sixteen_gb_mask;
+ mc->agp_size = size_bf;
+ } else {
+ mc->agp_start = ALIGN(mc->fb_end + 1, sixteen_gb);
+ mc->agp_size = size_af;
+ }
+
+ mc->agp_end = mc->agp_start + mc->agp_size - 1;
+ dev_info(adev->dev, "AGP: %lluM 0x%016llX - 0x%016llX\n",
+ mc->agp_size >> 20, mc->agp_start, mc->agp_end);
+}
+
+/**
+ * amdgpu_gmc_set_agp_default - Set the default AGP aperture value.
+ * @adev: amdgpu device structure holding all necessary information
+ * @mc: memory controller structure holding memory information
+ *
+ * To disable the AGP aperture, you need to set the start to a larger
+ * value than the end. This function sets the default value which
+ * can then be overridden using amdgpu_gmc_agp_location() if you want
+ * to enable the AGP aperture on a specific chip.
+ *
+ */
+void amdgpu_gmc_set_agp_default(struct amdgpu_device *adev,
+ struct amdgpu_gmc *mc)
+{
+ mc->agp_start = 0xffffffffffff;
+ mc->agp_end = 0;
+ mc->agp_size = 0;
+}
+
+/**
+ * amdgpu_gmc_fault_key - get hask key from vm fault address and pasid
+ *
+ * @addr: 48 bit physical address, page aligned (36 significant bits)
+ * @pasid: 16 bit process address space identifier
+ */
+static inline uint64_t amdgpu_gmc_fault_key(uint64_t addr, uint16_t pasid)
+{
+ return addr << 4 | pasid;
+}
+
+/**
+ * amdgpu_gmc_filter_faults - filter VM faults
+ *
+ * @adev: amdgpu device structure
+ * @ih: interrupt ring that the fault received from
+ * @addr: address of the VM fault
+ * @pasid: PASID of the process causing the fault
+ * @timestamp: timestamp of the fault
+ *
+ * Returns:
+ * True if the fault was filtered and should not be processed further.
+ * False if the fault is a new one and needs to be handled.
+ */
+bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih, uint64_t addr,
+ uint16_t pasid, uint64_t timestamp)
+{
+ struct amdgpu_gmc *gmc = &adev->gmc;
+ uint64_t stamp, key = amdgpu_gmc_fault_key(addr, pasid);
+ struct amdgpu_gmc_fault *fault;
+ uint32_t hash;
+
+ /* Stale retry fault if timestamp goes backward */
+ if (amdgpu_ih_ts_after(timestamp, ih->processed_timestamp))
+ return true;
+
+ /* If we don't have space left in the ring buffer return immediately */
+ stamp = max(timestamp, AMDGPU_GMC_FAULT_TIMEOUT + 1) -
+ AMDGPU_GMC_FAULT_TIMEOUT;
+ if (gmc->fault_ring[gmc->last_fault].timestamp >= stamp)
+ return true;
+
+ /* Try to find the fault in the hash */
+ hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER);
+ fault = &gmc->fault_ring[gmc->fault_hash[hash].idx];
+ while (fault->timestamp >= stamp) {
+ uint64_t tmp;
+
+ if (atomic64_read(&fault->key) == key) {
+ /*
+ * if we get a fault which is already present in
+ * the fault_ring and the timestamp of
+ * the fault is after the expired timestamp,
+ * then this is a new fault that needs to be added
+ * into the fault ring.
+ */
+ if (fault->timestamp_expiry != 0 &&
+ amdgpu_ih_ts_after(fault->timestamp_expiry,
+ timestamp))
+ break;
+ else
+ return true;
+ }
+
+ tmp = fault->timestamp;
+ fault = &gmc->fault_ring[fault->next];
+
+ /* Check if the entry was reused */
+ if (fault->timestamp >= tmp)
+ break;
+ }
+
+ /* Add the fault to the ring */
+ fault = &gmc->fault_ring[gmc->last_fault];
+ atomic64_set(&fault->key, key);
+ fault->timestamp = timestamp;
+
+ /* And update the hash */
+ fault->next = gmc->fault_hash[hash].idx;
+ gmc->fault_hash[hash].idx = gmc->last_fault++;
+ return false;
+}
+
+/**
+ * amdgpu_gmc_filter_faults_remove - remove address from VM faults filter
+ *
+ * @adev: amdgpu device structure
+ * @addr: address of the VM fault
+ * @pasid: PASID of the process causing the fault
+ *
+ * Remove the address from fault filter, then future vm fault on this address
+ * will pass to retry fault handler to recover.
+ */
+void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
+ uint16_t pasid)
+{
+ struct amdgpu_gmc *gmc = &adev->gmc;
+ uint64_t key = amdgpu_gmc_fault_key(addr, pasid);
+ struct amdgpu_ih_ring *ih;
+ struct amdgpu_gmc_fault *fault;
+ uint32_t last_wptr;
+ uint64_t last_ts;
+ uint32_t hash;
+ uint64_t tmp;
+
+ if (adev->irq.retry_cam_enabled)
+ return;
+
+ ih = &adev->irq.ih1;
+ /* Get the WPTR of the last entry in IH ring */
+ last_wptr = amdgpu_ih_get_wptr(adev, ih);
+ /* Order wptr with ring data. */
+ rmb();
+ /* Get the timetamp of the last entry in IH ring */
+ last_ts = amdgpu_ih_decode_iv_ts(adev, ih, last_wptr, -1);
+
+ hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER);
+ fault = &gmc->fault_ring[gmc->fault_hash[hash].idx];
+ do {
+ if (atomic64_read(&fault->key) == key) {
+ /*
+ * Update the timestamp when this fault
+ * expired.
+ */
+ fault->timestamp_expiry = last_ts;
+ break;
+ }
+
+ tmp = fault->timestamp;
+ fault = &gmc->fault_ring[fault->next];
+ } while (fault->timestamp < tmp);
+}
+
+int amdgpu_gmc_ras_sw_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ /* umc ras block */
+ r = amdgpu_umc_ras_sw_init(adev);
+ if (r)
+ return r;
+
+ /* mmhub ras block */
+ r = amdgpu_mmhub_ras_sw_init(adev);
+ if (r)
+ return r;
+
+ /* hdp ras block */
+ r = amdgpu_hdp_ras_sw_init(adev);
+ if (r)
+ return r;
+
+ /* mca.x ras block */
+ r = amdgpu_mca_mp0_ras_sw_init(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_mca_mp1_ras_sw_init(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_mca_mpio_ras_sw_init(adev);
+ if (r)
+ return r;
+
+ /* xgmi ras block */
+ r = amdgpu_xgmi_ras_sw_init(adev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
+{
+ return 0;
+}
+
+void amdgpu_gmc_ras_fini(struct amdgpu_device *adev)
+{
+
+}
+
+ /*
+ * The latest engine allocation on gfx9/10 is:
+ * Engine 2, 3: firmware
+ * Engine 0, 1, 4~16: amdgpu ring,
+ * subject to change when ring number changes
+ * Engine 17: Gart flushes
+ */
+#define AMDGPU_VMHUB_INV_ENG_BITMAP 0x1FFF3
+
+int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] = {0};
+ unsigned i;
+ unsigned vmhub, inv_eng;
+ struct amdgpu_ring *shared_ring;
+
+ /* init the vm inv eng for all vmhubs */
+ for_each_set_bit(i, adev->vmhubs_mask, AMDGPU_MAX_VMHUBS) {
+ vm_inv_engs[i] = AMDGPU_VMHUB_INV_ENG_BITMAP;
+ /* reserve engine 5 for firmware */
+ if (adev->enable_mes)
+ vm_inv_engs[i] &= ~(1 << 5);
+ /* reserve mmhub engine 3 for firmware */
+ if (adev->enable_umsch_mm)
+ vm_inv_engs[i] &= ~(1 << 3);
+ }
+
+ for (i = 0; i < adev->num_rings; ++i) {
+ ring = adev->rings[i];
+ vmhub = ring->vm_hub;
+
+ if (ring == &adev->mes.ring[0] ||
+ ring == &adev->mes.ring[1] ||
+ ring == &adev->umsch_mm.ring ||
+ ring == &adev->cper.ring_buf)
+ continue;
+
+ /* Skip if the ring is a shared ring */
+ if (amdgpu_sdma_is_shared_inv_eng(adev, ring))
+ continue;
+
+ inv_eng = ffs(vm_inv_engs[vmhub]);
+ if (!inv_eng) {
+ dev_err(adev->dev, "no VM inv eng for ring %s\n",
+ ring->name);
+ return -EINVAL;
+ }
+
+ ring->vm_inv_eng = inv_eng - 1;
+ vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
+
+ dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
+ ring->name, ring->vm_inv_eng, ring->vm_hub);
+ /* SDMA has a special packet which allows it to use the same
+ * invalidation engine for all the rings in one instance.
+ * Therefore, we do not allocate a separate VM invalidation engine
+ * for SDMA page rings. Instead, they share the VM invalidation
+ * engine with the SDMA gfx ring. This change ensures efficient
+ * resource management and avoids the issue of insufficient VM
+ * invalidation engines.
+ */
+ shared_ring = amdgpu_sdma_get_shared_ring(adev, ring);
+ if (shared_ring) {
+ shared_ring->vm_inv_eng = ring->vm_inv_eng;
+ dev_info(adev->dev, "ring %s shares VM invalidation engine %u with ring %s on hub %u\n",
+ ring->name, ring->vm_inv_eng, shared_ring->name, ring->vm_hub);
+ continue;
+ }
+ }
+
+ return 0;
+}
+
+void amdgpu_gmc_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
+ uint32_t vmhub, uint32_t flush_type)
+{
+ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+ struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
+ struct dma_fence *fence;
+ struct amdgpu_job *job;
+ int r;
+
+ if (!hub->sdma_invalidation_workaround || vmid ||
+ !adev->mman.buffer_funcs_enabled || !adev->ib_pool_ready ||
+ !ring->sched.ready) {
+ /*
+ * A GPU reset should flush all TLBs anyway, so no need to do
+ * this while one is ongoing.
+ */
+ if (!down_read_trylock(&adev->reset_domain->sem))
+ return;
+
+ if (adev->gmc.flush_tlb_needs_extra_type_2)
+ adev->gmc.gmc_funcs->flush_gpu_tlb(adev, vmid,
+ vmhub, 2);
+
+ if (adev->gmc.flush_tlb_needs_extra_type_0 && flush_type == 2)
+ adev->gmc.gmc_funcs->flush_gpu_tlb(adev, vmid,
+ vmhub, 0);
+
+ adev->gmc.gmc_funcs->flush_gpu_tlb(adev, vmid, vmhub,
+ flush_type);
+ up_read(&adev->reset_domain->sem);
+ return;
+ }
+
+ /* The SDMA on Navi 1x has a bug which can theoretically result in memory
+ * corruption if an invalidation happens at the same time as an VA
+ * translation. Avoid this by doing the invalidation from the SDMA
+ * itself at least for GART.
+ */
+ mutex_lock(&adev->mman.gtt_window_lock);
+ r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.high_pr,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
+ &job, AMDGPU_KERNEL_JOB_ID_FLUSH_GPU_TLB);
+ if (r)
+ goto error_alloc;
+
+ job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
+ job->vm_needs_flush = true;
+ job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
+ amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+ fence = amdgpu_job_submit(job);
+ mutex_unlock(&adev->mman.gtt_window_lock);
+
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+
+ return;
+
+error_alloc:
+ mutex_unlock(&adev->mman.gtt_window_lock);
+ dev_err(adev->dev, "Error flushing GPU TLB using the SDMA (%d)!\n", r);
+}
+
+int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
+ uint32_t flush_type, bool all_hub,
+ uint32_t inst)
+{
+ struct amdgpu_ring *ring = &adev->gfx.kiq[inst].ring;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[inst];
+ unsigned int ndw;
+ int r, cnt = 0;
+ uint32_t seq;
+
+ /*
+ * A GPU reset should flush all TLBs anyway, so no need to do
+ * this while one is ongoing.
+ */
+ if (!down_read_trylock(&adev->reset_domain->sem))
+ return 0;
+
+ if (!adev->gmc.flush_pasid_uses_kiq || !ring->sched.ready) {
+ if (adev->gmc.flush_tlb_needs_extra_type_2)
+ adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
+ 2, all_hub,
+ inst);
+
+ if (adev->gmc.flush_tlb_needs_extra_type_0 && flush_type == 2)
+ adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
+ 0, all_hub,
+ inst);
+
+ adev->gmc.gmc_funcs->flush_gpu_tlb_pasid(adev, pasid,
+ flush_type, all_hub,
+ inst);
+ r = 0;
+ } else {
+ /* 2 dwords flush + 8 dwords fence */
+ ndw = kiq->pmf->invalidate_tlbs_size + 8;
+
+ if (adev->gmc.flush_tlb_needs_extra_type_2)
+ ndw += kiq->pmf->invalidate_tlbs_size;
+
+ if (adev->gmc.flush_tlb_needs_extra_type_0)
+ ndw += kiq->pmf->invalidate_tlbs_size;
+
+ spin_lock(&adev->gfx.kiq[inst].ring_lock);
+ r = amdgpu_ring_alloc(ring, ndw);
+ if (r) {
+ spin_unlock(&adev->gfx.kiq[inst].ring_lock);
+ goto error_unlock_reset;
+ }
+ if (adev->gmc.flush_tlb_needs_extra_type_2)
+ kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 2, all_hub);
+
+ if (flush_type == 2 && adev->gmc.flush_tlb_needs_extra_type_0)
+ kiq->pmf->kiq_invalidate_tlbs(ring, pasid, 0, all_hub);
+
+ kiq->pmf->kiq_invalidate_tlbs(ring, pasid, flush_type, all_hub);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r) {
+ amdgpu_ring_undo(ring);
+ spin_unlock(&adev->gfx.kiq[inst].ring_lock);
+ goto error_unlock_reset;
+ }
+
+ amdgpu_ring_commit(ring);
+ spin_unlock(&adev->gfx.kiq[inst].ring_lock);
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY &&
+ !amdgpu_reset_pending(adev->reset_domain)) {
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY) {
+ dev_err(adev->dev, "timeout waiting for kiq fence\n");
+ r = -ETIME;
+ } else
+ r = 0;
+ }
+
+error_unlock_reset:
+ up_read(&adev->reset_domain->sem);
+ return r;
+}
+
+void amdgpu_gmc_fw_reg_write_reg_wait(struct amdgpu_device *adev,
+ uint32_t reg0, uint32_t reg1,
+ uint32_t ref, uint32_t mask,
+ uint32_t xcc_inst)
+{
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_inst];
+ struct amdgpu_ring *ring = &kiq->ring;
+ signed long r, cnt = 0;
+ unsigned long flags;
+ uint32_t seq;
+
+ if (adev->mes.ring[0].sched.ready) {
+ amdgpu_mes_reg_write_reg_wait(adev, reg0, reg1,
+ ref, mask);
+ return;
+ }
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ amdgpu_ring_alloc(ring, 32);
+ amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
+ ref, mask);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r)
+ goto failed_undo;
+
+ amdgpu_ring_commit(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ /* don't wait anymore for IRQ context */
+ if (r < 1 && in_interrupt())
+ goto failed_kiq;
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY &&
+ !amdgpu_reset_pending(adev->reset_domain)) {
+
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY)
+ goto failed_kiq;
+
+ return;
+
+failed_undo:
+ amdgpu_ring_undo(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+failed_kiq:
+ dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1);
+}
+
+/**
+ * amdgpu_gmc_tmz_set -- check and set if a device supports TMZ
+ * @adev: amdgpu_device pointer
+ *
+ * Check and set if an the device @adev supports Trusted Memory
+ * Zones (TMZ).
+ */
+void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ /* RAVEN */
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ /* RENOIR looks like RAVEN */
+ case IP_VERSION(9, 3, 0):
+ /* GC 10.3.7 */
+ case IP_VERSION(10, 3, 7):
+ /* GC 11.0.1 */
+ case IP_VERSION(11, 0, 1):
+ if (amdgpu_tmz == 0) {
+ adev->gmc.tmz_enabled = false;
+ dev_info(adev->dev,
+ "Trusted Memory Zone (TMZ) feature disabled (cmd line)\n");
+ } else {
+ adev->gmc.tmz_enabled = true;
+ dev_info(adev->dev,
+ "Trusted Memory Zone (TMZ) feature enabled\n");
+ }
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 6):
+ /* VANGOGH */
+ case IP_VERSION(10, 3, 1):
+ /* YELLOW_CARP*/
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 1):
+ case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 5, 3):
+ /* Don't enable it by default yet.
+ */
+ if (amdgpu_tmz < 1) {
+ adev->gmc.tmz_enabled = false;
+ dev_info(adev->dev,
+ "Trusted Memory Zone (TMZ) feature disabled as experimental (default)\n");
+ } else {
+ adev->gmc.tmz_enabled = true;
+ dev_info(adev->dev,
+ "Trusted Memory Zone (TMZ) feature enabled as experimental (cmd line)\n");
+ }
+ break;
+ default:
+ adev->gmc.tmz_enabled = false;
+ dev_info(adev->dev,
+ "Trusted Memory Zone (TMZ) feature not supported\n");
+ break;
+ }
+}
+
+/**
+ * amdgpu_gmc_noretry_set -- set per asic noretry defaults
+ * @adev: amdgpu_device pointer
+ *
+ * Set a per asic default for the no-retry parameter.
+ *
+ */
+void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
+{
+ struct amdgpu_gmc *gmc = &adev->gmc;
+ uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
+ bool noretry_default = (gc_ver == IP_VERSION(9, 0, 1) ||
+ gc_ver == IP_VERSION(9, 4, 0) ||
+ gc_ver == IP_VERSION(9, 4, 1) ||
+ gc_ver == IP_VERSION(9, 4, 2) ||
+ gc_ver == IP_VERSION(9, 4, 3) ||
+ gc_ver == IP_VERSION(9, 4, 4) ||
+ gc_ver == IP_VERSION(9, 5, 0) ||
+ gc_ver >= IP_VERSION(10, 3, 0));
+
+ if (!amdgpu_sriov_xnack_support(adev))
+ gmc->noretry = 1;
+ else
+ gmc->noretry = (amdgpu_noretry == -1) ? noretry_default : amdgpu_noretry;
+}
+
+void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
+ bool enable)
+{
+ struct amdgpu_vmhub *hub;
+ u32 tmp, reg, i;
+
+ hub = &adev->vmhub[hub_type];
+ for (i = 0; i < 16; i++) {
+ reg = hub->vm_context0_cntl + hub->ctx_distance * i;
+
+ tmp = (hub_type == AMDGPU_GFXHUB(0)) ?
+ RREG32_SOC15_IP(GC, reg) :
+ RREG32_SOC15_IP(MMHUB, reg);
+
+ if (enable)
+ tmp |= hub->vm_cntx_cntl_vm_fault;
+ else
+ tmp &= ~hub->vm_cntx_cntl_vm_fault;
+
+ (hub_type == AMDGPU_GFXHUB(0)) ?
+ WREG32_SOC15_IP(GC, reg, tmp) :
+ WREG32_SOC15_IP(MMHUB, reg, tmp);
+ }
+}
+
+void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
+{
+ unsigned size;
+
+ /*
+ * Some ASICs need to reserve a region of video memory to avoid access
+ * from driver
+ */
+ adev->mman.stolen_reserved_offset = 0;
+ adev->mman.stolen_reserved_size = 0;
+
+ /*
+ * TODO:
+ * Currently there is a bug where some memory client outside
+ * of the driver writes to first 8M of VRAM on S3 resume,
+ * this overrides GART which by default gets placed in first 8M and
+ * causes VM_FAULTS once GTT is accessed.
+ * Keep the stolen memory reservation until the while this is not solved.
+ */
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ adev->mman.keep_stolen_vga_memory = true;
+ /*
+ * VEGA10 SRIOV VF with MS_HYPERV host needs some firmware reserved area.
+ */
+#ifdef CONFIG_X86
+ if (amdgpu_sriov_vf(adev) && hypervisor_is_type(X86_HYPER_MS_HYPERV)) {
+ adev->mman.stolen_reserved_offset = 0x500000;
+ adev->mman.stolen_reserved_size = 0x200000;
+ }
+#endif
+ break;
+ case CHIP_RAVEN:
+ case CHIP_RENOIR:
+ adev->mman.keep_stolen_vga_memory = true;
+ break;
+ default:
+ adev->mman.keep_stolen_vga_memory = false;
+ break;
+ }
+
+ if (amdgpu_sriov_vf(adev) ||
+ !amdgpu_device_has_display_hardware(adev)) {
+ size = 0;
+ } else {
+ size = amdgpu_gmc_get_vbios_fb_size(adev);
+
+ if (adev->mman.keep_stolen_vga_memory)
+ size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION);
+ }
+
+ /* set to 0 if the pre-OS buffer uses up most of vram */
+ if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
+ size = 0;
+
+ if (size > AMDGPU_VBIOS_VGA_ALLOCATION) {
+ adev->mman.stolen_vga_size = AMDGPU_VBIOS_VGA_ALLOCATION;
+ adev->mman.stolen_extended_size = size - adev->mman.stolen_vga_size;
+ } else {
+ adev->mman.stolen_vga_size = size;
+ adev->mman.stolen_extended_size = 0;
+ }
+}
+
+/**
+ * amdgpu_gmc_init_pdb0 - initialize PDB0
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * This function is only used when GART page table is used
+ * for FB address translatioin. In such a case, we construct
+ * a 2-level system VM page table: PDB0->PTB, to cover both
+ * VRAM of the hive and system memory.
+ *
+ * PDB0 is static, initialized once on driver initialization.
+ * The first n entries of PDB0 are used as PTE by setting
+ * P bit to 1, pointing to VRAM. The n+1'th entry points
+ * to a big PTB covering system memory.
+ *
+ */
+void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
+{
+ int i;
+ uint64_t flags = adev->gart.gart_pte_flags; //TODO it is UC. explore NC/RW?
+ /* Each PDE0 (used as PTE) covers (2^vmid0_page_table_block_size)*2M
+ */
+ u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
+ u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21;
+ u64 vram_addr, vram_end;
+ u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo);
+ int idx;
+
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
+ return;
+
+ flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
+ flags |= AMDGPU_PTE_WRITEABLE;
+ flags |= AMDGPU_PTE_SNOOPED;
+ flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1));
+ flags |= AMDGPU_PDE_PTE_FLAG(adev);
+
+ vram_addr = adev->vm_manager.vram_base_offset;
+ if (!amdgpu_virt_xgmi_migrate_enabled(adev))
+ vram_addr -= adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
+ vram_end = vram_addr + vram_size;
+
+ /* The first n PDE0 entries are used as PTE,
+ * pointing to vram
+ */
+ for (i = 0; vram_addr < vram_end; i++, vram_addr += pde0_page_size)
+ amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, vram_addr, flags);
+
+ /* The n+1'th PDE0 entry points to a huge
+ * PTB who has more than 512 entries each
+ * pointing to a 4K system page
+ */
+ flags = AMDGPU_PTE_VALID;
+ flags |= AMDGPU_PTE_SNOOPED | AMDGPU_PDE_BFS_FLAG(adev, 0);
+ /* Requires gart_ptb_gpu_pa to be 4K aligned */
+ amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags);
+ drm_dev_exit(idx);
+}
+
+/**
+ * amdgpu_gmc_vram_mc2pa - calculate vram buffer's physical address from MC
+ * address
+ *
+ * @adev: amdgpu_device pointer
+ * @mc_addr: MC address of buffer
+ */
+uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr)
+{
+ return mc_addr - adev->gmc.vram_start + adev->vm_manager.vram_base_offset;
+}
+
+/**
+ * amdgpu_gmc_vram_pa - calculate vram buffer object's physical address from
+ * GPU's view
+ *
+ * @adev: amdgpu_device pointer
+ * @bo: amdgpu buffer object
+ */
+uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo)
+{
+ return amdgpu_gmc_vram_mc2pa(adev, amdgpu_bo_gpu_offset(bo));
+}
+
+int amdgpu_gmc_vram_checking(struct amdgpu_device *adev)
+{
+ struct amdgpu_bo *vram_bo = NULL;
+ uint64_t vram_gpu = 0;
+ void *vram_ptr = NULL;
+
+ int ret, size = 0x100000;
+ uint8_t cptr[10];
+
+ ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &vram_bo,
+ &vram_gpu,
+ &vram_ptr);
+ if (ret)
+ return ret;
+
+ memset(vram_ptr, 0x86, size);
+ memset(cptr, 0x86, 10);
+
+ /**
+ * Check the start, the mid, and the end of the memory if the content of
+ * each byte is the pattern "0x86". If yes, we suppose the vram bo is
+ * workable.
+ *
+ * Note: If check the each byte of whole 1M bo, it will cost too many
+ * seconds, so here, we just pick up three parts for emulation.
+ */
+ ret = memcmp(vram_ptr, cptr, 10);
+ if (ret) {
+ ret = -EIO;
+ goto release_buffer;
+ }
+
+ ret = memcmp(vram_ptr + (size / 2), cptr, 10);
+ if (ret) {
+ ret = -EIO;
+ goto release_buffer;
+ }
+
+ ret = memcmp(vram_ptr + size - 10, cptr, 10);
+ if (ret) {
+ ret = -EIO;
+ goto release_buffer;
+ }
+
+release_buffer:
+ amdgpu_bo_free_kernel(&vram_bo, &vram_gpu,
+ &vram_ptr);
+
+ return ret;
+}
+
+static const char *nps_desc[] = {
+ [AMDGPU_NPS1_PARTITION_MODE] = "NPS1",
+ [AMDGPU_NPS2_PARTITION_MODE] = "NPS2",
+ [AMDGPU_NPS3_PARTITION_MODE] = "NPS3",
+ [AMDGPU_NPS4_PARTITION_MODE] = "NPS4",
+ [AMDGPU_NPS6_PARTITION_MODE] = "NPS6",
+ [AMDGPU_NPS8_PARTITION_MODE] = "NPS8",
+};
+
+static ssize_t available_memory_partition_show(struct device *dev,
+ struct device_attribute *addr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ int size = 0, mode;
+ char *sep = "";
+
+ for_each_inst(mode, adev->gmc.supported_nps_modes) {
+ size += sysfs_emit_at(buf, size, "%s%s", sep, nps_desc[mode]);
+ sep = ", ";
+ }
+ size += sysfs_emit_at(buf, size, "\n");
+
+ return size;
+}
+
+static ssize_t current_memory_partition_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ enum amdgpu_memory_partition mode;
+ struct amdgpu_hive_info *hive;
+ int i;
+
+ mode = UNKNOWN_MEMORY_PARTITION_MODE;
+ for_each_inst(i, adev->gmc.supported_nps_modes) {
+ if (!strncasecmp(nps_desc[i], buf, strlen(nps_desc[i]))) {
+ mode = i;
+ break;
+ }
+ }
+
+ if (mode == UNKNOWN_MEMORY_PARTITION_MODE)
+ return -EINVAL;
+
+ if (mode == adev->gmc.gmc_funcs->query_mem_partition_mode(adev)) {
+ dev_info(
+ adev->dev,
+ "requested NPS mode is same as current NPS mode, skipping\n");
+ return count;
+ }
+
+ /* If device is part of hive, all devices in the hive should request the
+ * same mode. Hence store the requested mode in hive.
+ */
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ atomic_set(&hive->requested_nps_mode, mode);
+ amdgpu_put_xgmi_hive(hive);
+ } else {
+ adev->gmc.requested_nps_mode = mode;
+ }
+
+ dev_info(
+ adev->dev,
+ "NPS mode change requested, please remove and reload the driver\n");
+
+ return count;
+}
+
+static ssize_t current_memory_partition_show(
+ struct device *dev, struct device_attribute *addr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ enum amdgpu_memory_partition mode;
+
+ /* Only minimal precaution taken to reject requests while in reset */
+ if (amdgpu_in_reset(adev))
+ return -EPERM;
+
+ mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ if ((mode >= ARRAY_SIZE(nps_desc)) ||
+ (BIT(mode) & AMDGPU_ALL_NPS_MASK) != BIT(mode))
+ return sysfs_emit(buf, "UNKNOWN\n");
+
+ return sysfs_emit(buf, "%s\n", nps_desc[mode]);
+}
+
+static DEVICE_ATTR_RW(current_memory_partition);
+static DEVICE_ATTR_RO(available_memory_partition);
+
+int amdgpu_gmc_sysfs_init(struct amdgpu_device *adev)
+{
+ bool nps_switch_support;
+ int r = 0;
+
+ if (!adev->gmc.gmc_funcs->query_mem_partition_mode)
+ return 0;
+
+ nps_switch_support = (hweight32(adev->gmc.supported_nps_modes &
+ AMDGPU_ALL_NPS_MASK) > 1);
+ if (!nps_switch_support)
+ dev_attr_current_memory_partition.attr.mode &=
+ ~(S_IWUSR | S_IWGRP | S_IWOTH);
+ else
+ r = device_create_file(adev->dev,
+ &dev_attr_available_memory_partition);
+
+ if (r)
+ return r;
+
+ return device_create_file(adev->dev,
+ &dev_attr_current_memory_partition);
+}
+
+void amdgpu_gmc_sysfs_fini(struct amdgpu_device *adev)
+{
+ if (!adev->gmc.gmc_funcs->query_mem_partition_mode)
+ return;
+
+ device_remove_file(adev->dev, &dev_attr_current_memory_partition);
+ device_remove_file(adev->dev, &dev_attr_available_memory_partition);
+}
+
+int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev,
+ struct amdgpu_mem_partition_info *mem_ranges,
+ uint8_t *exp_ranges)
+{
+ struct amdgpu_gmc_memrange *ranges;
+ int range_cnt, ret, i, j;
+ uint32_t nps_type;
+ bool refresh;
+
+ if (!mem_ranges || !exp_ranges)
+ return -EINVAL;
+
+ refresh = (adev->init_lvl->level != AMDGPU_INIT_LEVEL_MINIMAL_XGMI) &&
+ (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS);
+ ret = amdgpu_discovery_get_nps_info(adev, &nps_type, &ranges,
+ &range_cnt, refresh);
+
+ if (ret)
+ return ret;
+
+ /* TODO: For now, expect ranges and partition count to be the same.
+ * Adjust if there are holes expected in any NPS domain.
+ */
+ if (*exp_ranges && (range_cnt != *exp_ranges)) {
+ dev_warn(
+ adev->dev,
+ "NPS config mismatch - expected ranges: %d discovery - nps mode: %d, nps ranges: %d",
+ *exp_ranges, nps_type, range_cnt);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ for (i = 0; i < range_cnt; ++i) {
+ if (ranges[i].base_address >= ranges[i].limit_address) {
+ dev_warn(
+ adev->dev,
+ "Invalid NPS range - nps mode: %d, range[%d]: base: %llx limit: %llx",
+ nps_type, i, ranges[i].base_address,
+ ranges[i].limit_address);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Check for overlaps, not expecting any now */
+ for (j = i - 1; j >= 0; j--) {
+ if (max(ranges[j].base_address,
+ ranges[i].base_address) <=
+ min(ranges[j].limit_address,
+ ranges[i].limit_address)) {
+ dev_warn(
+ adev->dev,
+ "overlapping ranges detected [ %llx - %llx ] | [%llx - %llx]",
+ ranges[j].base_address,
+ ranges[j].limit_address,
+ ranges[i].base_address,
+ ranges[i].limit_address);
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ mem_ranges[i].range.fpfn =
+ (ranges[i].base_address -
+ adev->vm_manager.vram_base_offset) >>
+ AMDGPU_GPU_PAGE_SHIFT;
+ mem_ranges[i].range.lpfn =
+ (ranges[i].limit_address -
+ adev->vm_manager.vram_base_offset) >>
+ AMDGPU_GPU_PAGE_SHIFT;
+ mem_ranges[i].size =
+ ranges[i].limit_address - ranges[i].base_address + 1;
+ }
+
+ if (!*exp_ranges)
+ *exp_ranges = range_cnt;
+err:
+ kfree(ranges);
+
+ return ret;
+}
+
+int amdgpu_gmc_request_memory_partition(struct amdgpu_device *adev,
+ int nps_mode)
+{
+ /* Not supported on VF devices and APUs */
+ if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
+ return -EOPNOTSUPP;
+
+ if (!adev->psp.funcs) {
+ dev_err(adev->dev,
+ "PSP interface not available for nps mode change request");
+ return -EINVAL;
+ }
+
+ return psp_memory_partition(&adev->psp, nps_mode);
+}
+
+static inline bool amdgpu_gmc_need_nps_switch_req(struct amdgpu_device *adev,
+ int req_nps_mode,
+ int cur_nps_mode)
+{
+ return (((BIT(req_nps_mode) & adev->gmc.supported_nps_modes) ==
+ BIT(req_nps_mode)) &&
+ req_nps_mode != cur_nps_mode);
+}
+
+void amdgpu_gmc_prepare_nps_mode_change(struct amdgpu_device *adev)
+{
+ int req_nps_mode, cur_nps_mode, r;
+ struct amdgpu_hive_info *hive;
+
+ if (amdgpu_sriov_vf(adev) || !adev->gmc.supported_nps_modes ||
+ !adev->gmc.gmc_funcs->request_mem_partition_mode)
+ return;
+
+ cur_nps_mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive) {
+ req_nps_mode = atomic_read(&hive->requested_nps_mode);
+ if (!amdgpu_gmc_need_nps_switch_req(adev, req_nps_mode,
+ cur_nps_mode)) {
+ amdgpu_put_xgmi_hive(hive);
+ return;
+ }
+ r = amdgpu_xgmi_request_nps_change(adev, hive, req_nps_mode);
+ amdgpu_put_xgmi_hive(hive);
+ goto out;
+ }
+
+ req_nps_mode = adev->gmc.requested_nps_mode;
+ if (!amdgpu_gmc_need_nps_switch_req(adev, req_nps_mode, cur_nps_mode))
+ return;
+
+ /* even if this fails, we should let driver unload w/o blocking */
+ r = adev->gmc.gmc_funcs->request_mem_partition_mode(adev, req_nps_mode);
+out:
+ if (r)
+ dev_err(adev->dev, "NPS mode change request failed\n");
+ else
+ dev_info(
+ adev->dev,
+ "NPS mode change request done, reload driver to complete the change\n");
+}
+
+bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev)
+{
+ if (adev->gmc.gmc_funcs->need_reset_on_init)
+ return adev->gmc.gmc_funcs->need_reset_on_init(adev);
+
+ return false;
+}
+
+enum amdgpu_memory_partition
+amdgpu_gmc_get_vf_memory_partition(struct amdgpu_device *adev)
+{
+ switch (adev->gmc.num_mem_partitions) {
+ case 0:
+ return UNKNOWN_MEMORY_PARTITION_MODE;
+ case 1:
+ return AMDGPU_NPS1_PARTITION_MODE;
+ case 2:
+ return AMDGPU_NPS2_PARTITION_MODE;
+ case 4:
+ return AMDGPU_NPS4_PARTITION_MODE;
+ case 8:
+ return AMDGPU_NPS8_PARTITION_MODE;
+ default:
+ return AMDGPU_NPS1_PARTITION_MODE;
+ }
+}
+
+enum amdgpu_memory_partition
+amdgpu_gmc_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes)
+{
+ enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE;
+
+ if (adev->nbio.funcs &&
+ adev->nbio.funcs->get_memory_partition_mode)
+ mode = adev->nbio.funcs->get_memory_partition_mode(adev,
+ supp_modes);
+ else
+ dev_warn(adev->dev, "memory partition mode query is not supported\n");
+
+ return mode;
+}
+
+enum amdgpu_memory_partition
+amdgpu_gmc_query_memory_partition(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev))
+ return amdgpu_gmc_get_vf_memory_partition(adev);
+ else
+ return amdgpu_gmc_get_memory_partition(adev, NULL);
+}
+
+static bool amdgpu_gmc_validate_partition_info(struct amdgpu_device *adev)
+{
+ enum amdgpu_memory_partition mode;
+ u32 supp_modes;
+ bool valid;
+
+ mode = amdgpu_gmc_get_memory_partition(adev, &supp_modes);
+
+ /* Mode detected by hardware not present in supported modes */
+ if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) &&
+ !(BIT(mode - 1) & supp_modes))
+ return false;
+
+ switch (mode) {
+ case UNKNOWN_MEMORY_PARTITION_MODE:
+ case AMDGPU_NPS1_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 1);
+ break;
+ case AMDGPU_NPS2_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 2);
+ break;
+ case AMDGPU_NPS4_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 3 ||
+ adev->gmc.num_mem_partitions == 4);
+ break;
+ case AMDGPU_NPS8_PARTITION_MODE:
+ valid = (adev->gmc.num_mem_partitions == 8);
+ break;
+ default:
+ valid = false;
+ }
+
+ return valid;
+}
+
+static bool amdgpu_gmc_is_node_present(int *node_ids, int num_ids, int nid)
+{
+ int i;
+
+ /* Check if node with id 'nid' is present in 'node_ids' array */
+ for (i = 0; i < num_ids; ++i)
+ if (node_ids[i] == nid)
+ return true;
+
+ return false;
+}
+
+static void
+amdgpu_gmc_init_acpi_mem_ranges(struct amdgpu_device *adev,
+ struct amdgpu_mem_partition_info *mem_ranges)
+{
+ struct amdgpu_numa_info numa_info;
+ int node_ids[AMDGPU_MAX_MEM_RANGES];
+ int num_ranges = 0, ret;
+ int num_xcc, xcc_id;
+ uint32_t xcc_mask;
+
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ xcc_mask = (1U << num_xcc) - 1;
+
+ for_each_inst(xcc_id, xcc_mask) {
+ ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
+ if (ret)
+ continue;
+
+ if (numa_info.nid == NUMA_NO_NODE) {
+ mem_ranges[0].size = numa_info.size;
+ mem_ranges[0].numa.node = numa_info.nid;
+ num_ranges = 1;
+ break;
+ }
+
+ if (amdgpu_gmc_is_node_present(node_ids, num_ranges,
+ numa_info.nid))
+ continue;
+
+ node_ids[num_ranges] = numa_info.nid;
+ mem_ranges[num_ranges].numa.node = numa_info.nid;
+ mem_ranges[num_ranges].size = numa_info.size;
+ ++num_ranges;
+ }
+
+ adev->gmc.num_mem_partitions = num_ranges;
+}
+
+void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev,
+ struct amdgpu_mem_partition_info *mem_ranges)
+{
+ enum amdgpu_memory_partition mode;
+ u32 start_addr = 0, size;
+ int i, r, l;
+
+ mode = amdgpu_gmc_query_memory_partition(adev);
+
+ switch (mode) {
+ case UNKNOWN_MEMORY_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 0;
+ break;
+ case AMDGPU_NPS1_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 1;
+ break;
+ case AMDGPU_NPS2_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 2;
+ break;
+ case AMDGPU_NPS4_PARTITION_MODE:
+ if (adev->flags & AMD_IS_APU)
+ adev->gmc.num_mem_partitions = 3;
+ else
+ adev->gmc.num_mem_partitions = 4;
+ break;
+ case AMDGPU_NPS8_PARTITION_MODE:
+ adev->gmc.num_mem_partitions = 8;
+ break;
+ default:
+ adev->gmc.num_mem_partitions = 1;
+ break;
+ }
+
+ /* Use NPS range info, if populated */
+ r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges,
+ &adev->gmc.num_mem_partitions);
+ if (!r) {
+ l = 0;
+ for (i = 1; i < adev->gmc.num_mem_partitions; ++i) {
+ if (mem_ranges[i].range.lpfn >
+ mem_ranges[i - 1].range.lpfn)
+ l = i;
+ }
+
+ } else {
+ if (!adev->gmc.num_mem_partitions) {
+ dev_warn(adev->dev,
+ "Not able to detect NPS mode, fall back to NPS1\n");
+ adev->gmc.num_mem_partitions = 1;
+ }
+ /* Fallback to sw based calculation */
+ size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT;
+ size /= adev->gmc.num_mem_partitions;
+
+ for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
+ mem_ranges[i].range.fpfn = start_addr;
+ mem_ranges[i].size =
+ ((u64)size << AMDGPU_GPU_PAGE_SHIFT);
+ mem_ranges[i].range.lpfn = start_addr + size - 1;
+ start_addr += size;
+ }
+
+ l = adev->gmc.num_mem_partitions - 1;
+ }
+
+ /* Adjust the last one */
+ mem_ranges[l].range.lpfn =
+ (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1;
+ mem_ranges[l].size =
+ adev->gmc.real_vram_size -
+ ((u64)mem_ranges[l].range.fpfn << AMDGPU_GPU_PAGE_SHIFT);
+}
+
+int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev)
+{
+ bool valid;
+
+ adev->gmc.mem_partitions = kcalloc(AMDGPU_MAX_MEM_RANGES,
+ sizeof(struct amdgpu_mem_partition_info),
+ GFP_KERNEL);
+ if (!adev->gmc.mem_partitions)
+ return -ENOMEM;
+
+ if (adev->gmc.is_app_apu)
+ amdgpu_gmc_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions);
+ else
+ amdgpu_gmc_init_sw_mem_ranges(adev, adev->gmc.mem_partitions);
+
+ if (amdgpu_sriov_vf(adev))
+ valid = true;
+ else
+ valid = amdgpu_gmc_validate_partition_info(adev);
+ if (!valid) {
+ /* TODO: handle invalid case */
+ dev_warn(adev->dev,
+ "Mem ranges not matching with hardware config\n");
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
new file mode 100644
index 000000000000..55097ca10738
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -0,0 +1,471 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+#ifndef __AMDGPU_GMC_H__
+#define __AMDGPU_GMC_H__
+
+#include <linux/types.h>
+
+#include "amdgpu_irq.h"
+#include "amdgpu_xgmi.h"
+#include "amdgpu_ras.h"
+
+/* VA hole for 48bit addresses on Vega10 */
+#define AMDGPU_GMC_HOLE_START 0x0000800000000000ULL
+#define AMDGPU_GMC_HOLE_END 0xffff800000000000ULL
+
+/*
+ * Hardware is programmed as if the hole doesn't exists with start and end
+ * address values.
+ *
+ * This mask is used to remove the upper 16bits of the VA and so come up with
+ * the linear addr value.
+ */
+#define AMDGPU_GMC_HOLE_MASK 0x0000ffffffffffffULL
+
+/*
+ * Ring size as power of two for the log of recent faults.
+ */
+#define AMDGPU_GMC_FAULT_RING_ORDER 8
+#define AMDGPU_GMC_FAULT_RING_SIZE (1 << AMDGPU_GMC_FAULT_RING_ORDER)
+
+/*
+ * Hash size as power of two for the log of recent faults
+ */
+#define AMDGPU_GMC_FAULT_HASH_ORDER 8
+#define AMDGPU_GMC_FAULT_HASH_SIZE (1 << AMDGPU_GMC_FAULT_HASH_ORDER)
+
+/*
+ * Number of IH timestamp ticks until a fault is considered handled
+ */
+#define AMDGPU_GMC_FAULT_TIMEOUT 5000ULL
+
+/* XNACK flags */
+#define AMDGPU_GMC_XNACK_FLAG_CHAIN BIT(0)
+
+struct firmware;
+
+enum amdgpu_memory_partition {
+ UNKNOWN_MEMORY_PARTITION_MODE = 0,
+ AMDGPU_NPS1_PARTITION_MODE = 1,
+ AMDGPU_NPS2_PARTITION_MODE = 2,
+ AMDGPU_NPS3_PARTITION_MODE = 3,
+ AMDGPU_NPS4_PARTITION_MODE = 4,
+ AMDGPU_NPS6_PARTITION_MODE = 6,
+ AMDGPU_NPS8_PARTITION_MODE = 8,
+};
+
+#define AMDGPU_ALL_NPS_MASK \
+ (BIT(AMDGPU_NPS1_PARTITION_MODE) | BIT(AMDGPU_NPS2_PARTITION_MODE) | \
+ BIT(AMDGPU_NPS3_PARTITION_MODE) | BIT(AMDGPU_NPS4_PARTITION_MODE) | \
+ BIT(AMDGPU_NPS6_PARTITION_MODE) | BIT(AMDGPU_NPS8_PARTITION_MODE))
+
+#define AMDGPU_GMC_INIT_RESET_NPS BIT(0)
+
+#define AMDGPU_MAX_MEM_RANGES 8
+
+/*
+ * GMC page fault information
+ */
+struct amdgpu_gmc_fault {
+ uint64_t timestamp:48;
+ uint64_t next:AMDGPU_GMC_FAULT_RING_ORDER;
+ atomic64_t key;
+ uint64_t timestamp_expiry:48;
+};
+
+/*
+ * VMHUB structures, functions & helpers
+ */
+struct amdgpu_vmhub_funcs {
+ void (*print_l2_protection_fault_status)(struct amdgpu_device *adev,
+ uint32_t status);
+ uint32_t (*get_invalidate_req)(unsigned int vmid, uint32_t flush_type);
+};
+
+struct amdgpu_vmhub {
+ uint32_t ctx0_ptb_addr_lo32;
+ uint32_t ctx0_ptb_addr_hi32;
+ uint32_t vm_inv_eng0_sem;
+ uint32_t vm_inv_eng0_req;
+ uint32_t vm_inv_eng0_ack;
+ uint32_t vm_context0_cntl;
+ uint32_t vm_l2_pro_fault_status;
+ uint32_t vm_l2_pro_fault_cntl;
+
+ /*
+ * store the register distances between two continuous context domain
+ * and invalidation engine.
+ */
+ uint32_t ctx_distance;
+ uint32_t ctx_addr_distance; /* include LO32/HI32 */
+ uint32_t eng_distance;
+ uint32_t eng_addr_distance; /* include LO32/HI32 */
+
+ uint32_t vm_cntx_cntl;
+ uint32_t vm_cntx_cntl_vm_fault;
+ uint32_t vm_l2_bank_select_reserved_cid2;
+
+ uint32_t vm_contexts_disable;
+
+ bool sdma_invalidation_workaround;
+
+ const struct amdgpu_vmhub_funcs *vmhub_funcs;
+};
+
+/*
+ * GPU MC structures, functions & helpers
+ */
+struct amdgpu_gmc_funcs {
+ /* flush the vm tlb via mmio */
+ void (*flush_gpu_tlb)(struct amdgpu_device *adev, uint32_t vmid,
+ uint32_t vmhub, uint32_t flush_type);
+ /* flush the vm tlb via pasid */
+ void (*flush_gpu_tlb_pasid)(struct amdgpu_device *adev, uint16_t pasid,
+ uint32_t flush_type, bool all_hub,
+ uint32_t inst);
+ /* flush the vm tlb via ring */
+ uint64_t (*emit_flush_gpu_tlb)(struct amdgpu_ring *ring, unsigned vmid,
+ uint64_t pd_addr);
+ /* Change the VMID -> PASID mapping */
+ void (*emit_pasid_mapping)(struct amdgpu_ring *ring, unsigned vmid,
+ unsigned pasid);
+ /* enable/disable PRT support */
+ void (*set_prt)(struct amdgpu_device *adev, bool enable);
+ /* get the pde for a given mc addr */
+ void (*get_vm_pde)(struct amdgpu_device *adev, int level,
+ u64 *dst, u64 *flags);
+ /* get the pte flags to use for PTEs */
+ void (*get_vm_pte)(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo,
+ uint32_t vm_flags,
+ uint64_t *pte_flags);
+ /* override per-page pte flags */
+ void (*override_vm_pte_flags)(struct amdgpu_device *dev,
+ struct amdgpu_vm *vm,
+ uint64_t addr, uint64_t *flags);
+ /* get the amount of memory used by the vbios for pre-OS console */
+ unsigned int (*get_vbios_fb_size)(struct amdgpu_device *adev);
+ /* get the DCC buffer alignment */
+ unsigned int (*get_dcc_alignment)(struct amdgpu_device *adev);
+
+ enum amdgpu_memory_partition (*query_mem_partition_mode)(
+ struct amdgpu_device *adev);
+ /* Request NPS mode */
+ int (*request_mem_partition_mode)(struct amdgpu_device *adev,
+ int nps_mode);
+ bool (*need_reset_on_init)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_mem_partition_info {
+ union {
+ struct {
+ uint32_t fpfn;
+ uint32_t lpfn;
+ } range;
+ struct {
+ int node;
+ } numa;
+ };
+ uint64_t size;
+};
+
+#define INVALID_PFN -1
+
+struct amdgpu_gmc_memrange {
+ uint64_t base_address;
+ uint64_t limit_address;
+ uint32_t flags;
+ int nid_mask;
+};
+
+enum amdgpu_gart_placement {
+ AMDGPU_GART_PLACEMENT_BEST_FIT = 0,
+ AMDGPU_GART_PLACEMENT_HIGH,
+ AMDGPU_GART_PLACEMENT_LOW,
+};
+
+struct amdgpu_gmc {
+ /* FB's physical address in MMIO space (for CPU to
+ * map FB). This is different compared to the agp/
+ * gart/vram_start/end field as the later is from
+ * GPU's view and aper_base is from CPU's view.
+ */
+ resource_size_t aper_size;
+ resource_size_t aper_base;
+ /* for some chips with <= 32MB we need to lie
+ * about vram size near mc fb location */
+ u64 mc_vram_size;
+ u64 visible_vram_size;
+ /* AGP aperture start and end in MC address space
+ * Driver find a hole in the MC address space
+ * to place AGP by setting MC_VM_AGP_BOT/TOP registers
+ * Under VMID0, logical address == MC address. AGP
+ * aperture maps to physical bus or IOVA addressed.
+ * AGP aperture is used to simulate FB in ZFB case.
+ * AGP aperture is also used for page table in system
+ * memory (mainly for APU).
+ *
+ */
+ u64 agp_size;
+ u64 agp_start;
+ u64 agp_end;
+ /* GART aperture start and end in MC address space
+ * Driver find a hole in the MC address space
+ * to place GART by setting VM_CONTEXT0_PAGE_TABLE_START/END_ADDR
+ * registers
+ * Under VMID0, logical address inside GART aperture will
+ * be translated through gpuvm gart page table to access
+ * paged system memory
+ */
+ u64 gart_size;
+ u64 gart_start;
+ u64 gart_end;
+ /* Frame buffer aperture of this GPU device. Different from
+ * fb_start (see below), this only covers the local GPU device.
+ * If driver uses FB aperture to access FB, driver get fb_start from
+ * MC_VM_FB_LOCATION_BASE (set by vbios) and calculate vram_start
+ * of this local device by adding an offset inside the XGMI hive.
+ * If driver uses GART table for VMID0 FB access, driver finds a hole in
+ * VMID0's virtual address space to place the SYSVM aperture inside
+ * which the first part is vram and the second part is gart (covering
+ * system ram).
+ */
+ u64 vram_start;
+ u64 vram_end;
+ /* FB region , it's same as local vram region in single GPU, in XGMI
+ * configuration, this region covers all GPUs in the same hive ,
+ * each GPU in the hive has the same view of this FB region .
+ * GPU0's vram starts at offset (0 * segment size) ,
+ * GPU1 starts at offset (1 * segment size), etc.
+ */
+ u64 fb_start;
+ u64 fb_end;
+ unsigned vram_width;
+ u64 real_vram_size;
+ int vram_mtrr;
+ u64 mc_mask;
+ const struct firmware *fw; /* MC firmware */
+ uint32_t fw_version;
+ struct amdgpu_irq_src vm_fault;
+ uint32_t vram_type;
+ uint8_t vram_vendor;
+ uint32_t srbm_soft_reset;
+ bool prt_warning;
+ uint32_t sdpif_register;
+ /* apertures */
+ u64 shared_aperture_start;
+ u64 shared_aperture_end;
+ u64 private_aperture_start;
+ u64 private_aperture_end;
+ /* protects concurrent invalidation */
+ spinlock_t invalidate_lock;
+ bool translate_further;
+ struct kfd_vm_fault_info *vm_fault_info;
+ atomic_t vm_fault_info_updated;
+
+ struct amdgpu_gmc_fault fault_ring[AMDGPU_GMC_FAULT_RING_SIZE];
+ struct {
+ uint64_t idx:AMDGPU_GMC_FAULT_RING_ORDER;
+ } fault_hash[AMDGPU_GMC_FAULT_HASH_SIZE];
+ uint64_t last_fault:AMDGPU_GMC_FAULT_RING_ORDER;
+
+ bool tmz_enabled;
+ bool is_app_apu;
+
+ struct amdgpu_mem_partition_info *mem_partitions;
+ uint8_t num_mem_partitions;
+ const struct amdgpu_gmc_funcs *gmc_funcs;
+ enum amdgpu_memory_partition requested_nps_mode;
+ uint32_t supported_nps_modes;
+ uint32_t reset_flags;
+
+ struct amdgpu_xgmi xgmi;
+ struct amdgpu_irq_src ecc_irq;
+ int noretry;
+ uint32_t xnack_flags;
+
+ uint32_t vmid0_page_table_block_size;
+ uint32_t vmid0_page_table_depth;
+ struct amdgpu_bo *pdb0_bo;
+ /* CPU kmapped address of pdb0*/
+ void *ptr_pdb0;
+
+ /* MALL size */
+ u64 mall_size;
+ uint32_t m_half_use;
+
+ /* number of UMC instances */
+ int num_umc;
+ /* mode2 save restore */
+ u64 VM_L2_CNTL;
+ u64 VM_L2_CNTL2;
+ u64 VM_DUMMY_PAGE_FAULT_CNTL;
+ u64 VM_DUMMY_PAGE_FAULT_ADDR_LO32;
+ u64 VM_DUMMY_PAGE_FAULT_ADDR_HI32;
+ u64 VM_L2_PROTECTION_FAULT_CNTL;
+ u64 VM_L2_PROTECTION_FAULT_CNTL2;
+ u64 VM_L2_PROTECTION_FAULT_MM_CNTL3;
+ u64 VM_L2_PROTECTION_FAULT_MM_CNTL4;
+ u64 VM_L2_PROTECTION_FAULT_ADDR_LO32;
+ u64 VM_L2_PROTECTION_FAULT_ADDR_HI32;
+ u64 VM_DEBUG;
+ u64 VM_L2_MM_GROUP_RT_CLASSES;
+ u64 VM_L2_BANK_SELECT_RESERVED_CID;
+ u64 VM_L2_BANK_SELECT_RESERVED_CID2;
+ u64 VM_L2_CACHE_PARITY_CNTL;
+ u64 VM_L2_IH_LOG_CNTL;
+ u64 VM_CONTEXT_CNTL[16];
+ u64 VM_CONTEXT_PAGE_TABLE_BASE_ADDR_LO32[16];
+ u64 VM_CONTEXT_PAGE_TABLE_BASE_ADDR_HI32[16];
+ u64 VM_CONTEXT_PAGE_TABLE_START_ADDR_LO32[16];
+ u64 VM_CONTEXT_PAGE_TABLE_START_ADDR_HI32[16];
+ u64 VM_CONTEXT_PAGE_TABLE_END_ADDR_LO32[16];
+ u64 VM_CONTEXT_PAGE_TABLE_END_ADDR_HI32[16];
+ u64 MC_VM_MX_L1_TLB_CNTL;
+
+ u64 noretry_flags;
+
+ bool flush_tlb_needs_extra_type_0;
+ bool flush_tlb_needs_extra_type_2;
+ bool flush_pasid_uses_kiq;
+};
+
+#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
+#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
+#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
+#define amdgpu_gmc_get_vm_pte(adev, vm, bo, vm_flags, pte_flags) \
+ ((adev)->gmc.gmc_funcs->get_vm_pte((adev), (vm), (bo), (vm_flags), \
+ (pte_flags)))
+#define amdgpu_gmc_override_vm_pte_flags(adev, vm, addr, pte_flags) \
+ (adev)->gmc.gmc_funcs->override_vm_pte_flags \
+ ((adev), (vm), (addr), (pte_flags))
+#define amdgpu_gmc_get_vbios_fb_size(adev) (adev)->gmc.gmc_funcs->get_vbios_fb_size((adev))
+#define amdgpu_gmc_get_dcc_alignment(adev) ({ \
+ typeof(adev) _adev = (adev); \
+ _adev->gmc.gmc_funcs->get_dcc_alignment(_adev); \
+})
+
+/**
+ * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns:
+ * True if full VRAM is visible through the BAR
+ */
+static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc)
+{
+ WARN_ON(gmc->real_vram_size < gmc->visible_vram_size);
+
+ return (gmc->real_vram_size == gmc->visible_vram_size);
+}
+
+/**
+ * amdgpu_gmc_sign_extend - sign extend the given gmc address
+ *
+ * @addr: address to extend
+ */
+static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr)
+{
+ if (addr >= AMDGPU_GMC_HOLE_START)
+ addr |= AMDGPU_GMC_HOLE_END;
+
+ return addr;
+}
+
+bool amdgpu_gmc_is_pdb0_enabled(struct amdgpu_device *adev);
+int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev);
+void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
+ uint64_t *addr, uint64_t *flags);
+int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
+ uint32_t gpu_page_idx, uint64_t addr,
+ uint64_t flags);
+uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo);
+uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo);
+void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc);
+void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
+ u64 base);
+void amdgpu_gmc_gart_location(struct amdgpu_device *adev,
+ struct amdgpu_gmc *mc,
+ enum amdgpu_gart_placement gart_placement);
+void amdgpu_gmc_agp_location(struct amdgpu_device *adev,
+ struct amdgpu_gmc *mc);
+void amdgpu_gmc_set_agp_default(struct amdgpu_device *adev,
+ struct amdgpu_gmc *mc);
+bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih, uint64_t addr,
+ uint16_t pasid, uint64_t timestamp);
+void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr,
+ uint16_t pasid);
+int amdgpu_gmc_ras_sw_init(struct amdgpu_device *adev);
+int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
+void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
+int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev);
+void amdgpu_gmc_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
+ uint32_t vmhub, uint32_t flush_type);
+int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
+ uint32_t flush_type, bool all_hub,
+ uint32_t inst);
+void amdgpu_gmc_fw_reg_write_reg_wait(struct amdgpu_device *adev,
+ uint32_t reg0, uint32_t reg1,
+ uint32_t ref, uint32_t mask,
+ uint32_t xcc_inst);
+
+extern void amdgpu_gmc_tmz_set(struct amdgpu_device *adev);
+extern void amdgpu_gmc_noretry_set(struct amdgpu_device *adev);
+
+extern void
+amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
+ bool enable);
+
+void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev);
+
+void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev);
+uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr);
+uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo);
+int amdgpu_gmc_vram_checking(struct amdgpu_device *adev);
+int amdgpu_gmc_sysfs_init(struct amdgpu_device *adev);
+void amdgpu_gmc_sysfs_fini(struct amdgpu_device *adev);
+
+int amdgpu_gmc_get_nps_memranges(struct amdgpu_device *adev,
+ struct amdgpu_mem_partition_info *mem_ranges,
+ uint8_t *exp_ranges);
+
+int amdgpu_gmc_request_memory_partition(struct amdgpu_device *adev,
+ int nps_mode);
+void amdgpu_gmc_prepare_nps_mode_change(struct amdgpu_device *adev);
+bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev);
+enum amdgpu_memory_partition
+amdgpu_gmc_get_vf_memory_partition(struct amdgpu_device *adev);
+enum amdgpu_memory_partition
+amdgpu_gmc_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes);
+enum amdgpu_memory_partition
+amdgpu_gmc_query_memory_partition(struct amdgpu_device *adev);
+int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev);
+void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev,
+ struct amdgpu_mem_partition_info *mem_ranges);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index f7d22c44034d..0760e70402ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -22,182 +22,138 @@
* Authors: Christian König
*/
-#include <drm/drmP.h>
+#include <drm/ttm/ttm_range_manager.h>
+
#include "amdgpu.h"
-struct amdgpu_gtt_mgr {
- struct drm_mm mm;
- spinlock_t lock;
- uint64_t available;
-};
+static inline struct amdgpu_gtt_mgr *
+to_gtt_mgr(struct ttm_resource_manager *man)
+{
+ return container_of(man, struct amdgpu_gtt_mgr, manager);
+}
/**
- * amdgpu_gtt_mgr_init - init GTT manager and DRM MM
- *
- * @man: TTM memory type manager
- * @p_size: maximum size of GTT
+ * DOC: mem_info_gtt_total
*
- * Allocate and initialize the GTT manager.
+ * The amdgpu driver provides a sysfs API for reporting current total size of
+ * the GTT.
+ * The file mem_info_gtt_total is used for this, and returns the total size of
+ * the GTT block, in bytes
*/
-static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
- unsigned long p_size)
+static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- struct amdgpu_gtt_mgr *mgr;
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct ttm_resource_manager *man;
- mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
- if (!mgr)
- return -ENOMEM;
-
- drm_mm_init(&mgr->mm, 0, p_size);
- spin_lock_init(&mgr->lock);
- mgr->available = p_size;
- man->priv = mgr;
- return 0;
+ man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
+ return sysfs_emit(buf, "%llu\n", man->size);
}
/**
- * amdgpu_gtt_mgr_fini - free and destroy GTT manager
- *
- * @man: TTM memory type manager
+ * DOC: mem_info_gtt_used
*
- * Destroy and free the GTT manager, returns -EBUSY if ranges are still
- * allocated inside it.
+ * The amdgpu driver provides a sysfs API for reporting current total amount of
+ * used GTT.
+ * The file mem_info_gtt_used is used for this, and returns the current used
+ * size of the GTT block, in bytes
*/
-static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
+static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- struct amdgpu_gtt_mgr *mgr = man->priv;
-
- spin_lock(&mgr->lock);
- if (!drm_mm_clean(&mgr->mm)) {
- spin_unlock(&mgr->lock);
- return -EBUSY;
- }
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct ttm_resource_manager *man = &adev->mman.gtt_mgr.manager;
- drm_mm_takedown(&mgr->mm);
- spin_unlock(&mgr->lock);
- kfree(mgr);
- man->priv = NULL;
- return 0;
+ return sysfs_emit(buf, "%llu\n", ttm_resource_manager_usage(man));
}
+static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO,
+ amdgpu_mem_info_gtt_total_show, NULL);
+static DEVICE_ATTR(mem_info_gtt_used, S_IRUGO,
+ amdgpu_mem_info_gtt_used_show, NULL);
+
+static struct attribute *amdgpu_gtt_mgr_attributes[] = {
+ &dev_attr_mem_info_gtt_total.attr,
+ &dev_attr_mem_info_gtt_used.attr,
+ NULL
+};
+
+const struct attribute_group amdgpu_gtt_mgr_attr_group = {
+ .attrs = amdgpu_gtt_mgr_attributes
+};
+
/**
- * amdgpu_gtt_mgr_alloc - allocate new ranges
+ * amdgpu_gtt_mgr_has_gart_addr - Check if mem has address space
*
- * @man: TTM memory type manager
- * @tbo: TTM BO we need this range for
- * @place: placement flags and restrictions
- * @mem: the resulting mem object
+ * @res: the mem object to check
*
- * Allocate the address space for a node.
+ * Check if a mem object has already address space allocated.
*/
-int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
- struct ttm_buffer_object *tbo,
- const struct ttm_place *place,
- struct ttm_mem_reg *mem)
+bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res)
{
- struct amdgpu_gtt_mgr *mgr = man->priv;
- struct drm_mm_node *node = mem->mm_node;
- enum drm_mm_insert_mode mode;
- unsigned long fpfn, lpfn;
- int r;
-
- if (node->start != AMDGPU_BO_INVALID_OFFSET)
- return 0;
-
- if (place)
- fpfn = place->fpfn;
- else
- fpfn = 0;
+ struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
- if (place && place->lpfn)
- lpfn = place->lpfn;
- else
- lpfn = man->size;
-
- mode = DRM_MM_INSERT_BEST;
- if (place && place->flags & TTM_PL_FLAG_TOPDOWN)
- mode = DRM_MM_INSERT_HIGH;
-
- spin_lock(&mgr->lock);
- r = drm_mm_insert_node_in_range(&mgr->mm, node,
- mem->num_pages, mem->page_alignment, 0,
- fpfn, lpfn, mode);
- spin_unlock(&mgr->lock);
-
- if (!r) {
- mem->start = node->start;
- if (&tbo->mem == mem)
- tbo->offset = (tbo->mem.start << PAGE_SHIFT) +
- tbo->bdev->man[tbo->mem.mem_type].gpu_offset;
- }
-
- return r;
+ return drm_mm_node_allocated(&node->mm_nodes[0]);
}
-void amdgpu_gtt_mgr_print(struct seq_file *m, struct ttm_mem_type_manager *man)
-{
- struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
- struct amdgpu_gtt_mgr *mgr = man->priv;
-
- seq_printf(m, "man size:%llu pages, gtt available:%llu pages, usage:%lluMB\n",
- man->size, mgr->available, (u64)atomic64_read(&adev->gtt_usage) >> 20);
-
-}
/**
* amdgpu_gtt_mgr_new - allocate a new node
*
* @man: TTM memory type manager
* @tbo: TTM BO we need this range for
* @place: placement flags and restrictions
- * @mem: the resulting mem object
+ * @res: the resulting mem object
*
* Dummy, allocate the node but no space for it yet.
*/
-static int amdgpu_gtt_mgr_new(struct ttm_mem_type_manager *man,
+static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *tbo,
const struct ttm_place *place,
- struct ttm_mem_reg *mem)
+ struct ttm_resource **res)
{
- struct amdgpu_gtt_mgr *mgr = man->priv;
- struct drm_mm_node *node;
+ struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
+ uint32_t num_pages = PFN_UP(tbo->base.size);
+ struct ttm_range_mgr_node *node;
int r;
- spin_lock(&mgr->lock);
- if (mgr->available < mem->num_pages) {
- spin_unlock(&mgr->lock);
- return 0;
- }
- mgr->available -= mem->num_pages;
- spin_unlock(&mgr->lock);
+ node = kzalloc(struct_size(node, mm_nodes, 1), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node) {
- r = -ENOMEM;
- goto err_out;
+ ttm_resource_init(tbo, place, &node->base);
+ if (!(place->flags & TTM_PL_FLAG_TEMPORARY) &&
+ ttm_resource_manager_usage(man) > man->size) {
+ r = -ENOSPC;
+ goto err_free;
}
- node->start = AMDGPU_BO_INVALID_OFFSET;
- node->size = mem->num_pages;
- mem->mm_node = node;
-
- if (place->fpfn || place->lpfn || place->flags & TTM_PL_FLAG_TOPDOWN) {
- r = amdgpu_gtt_mgr_alloc(man, tbo, place, mem);
- if (unlikely(r)) {
- kfree(node);
- mem->mm_node = NULL;
- r = 0;
- goto err_out;
- }
+ if (place->lpfn) {
+ spin_lock(&mgr->lock);
+ r = drm_mm_insert_node_in_range(&mgr->mm, &node->mm_nodes[0],
+ num_pages, tbo->page_alignment,
+ 0, place->fpfn, place->lpfn,
+ DRM_MM_INSERT_BEST);
+ spin_unlock(&mgr->lock);
+ if (unlikely(r))
+ goto err_free;
+
+ node->base.start = node->mm_nodes[0].start;
} else {
- mem->start = node->start;
+ node->mm_nodes[0].start = 0;
+ node->mm_nodes[0].size = PFN_UP(node->base.size);
+ node->base.start = AMDGPU_BO_INVALID_OFFSET;
}
+ *res = &node->base;
return 0;
-err_out:
- spin_lock(&mgr->lock);
- mgr->available += mem->num_pages;
- spin_unlock(&mgr->lock);
+err_free:
+ ttm_resource_fini(man, &node->base);
+ kfree(node);
return r;
}
@@ -205,54 +161,162 @@ err_out:
* amdgpu_gtt_mgr_del - free ranges
*
* @man: TTM memory type manager
- * @tbo: TTM BO we need this range for
- * @place: placement flags and restrictions
- * @mem: TTM memory object
+ * @res: TTM memory object
*
* Free the allocated GTT again.
*/
-static void amdgpu_gtt_mgr_del(struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *mem)
+static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
+ struct ttm_resource *res)
{
- struct amdgpu_gtt_mgr *mgr = man->priv;
- struct drm_mm_node *node = mem->mm_node;
-
- if (!node)
- return;
+ struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
+ struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
spin_lock(&mgr->lock);
- if (node->start != AMDGPU_BO_INVALID_OFFSET)
- drm_mm_remove_node(node);
- mgr->available += mem->num_pages;
+ if (drm_mm_node_allocated(&node->mm_nodes[0]))
+ drm_mm_remove_node(&node->mm_nodes[0]);
spin_unlock(&mgr->lock);
+ ttm_resource_fini(man, res);
kfree(node);
- mem->mm_node = NULL;
+}
+
+/**
+ * amdgpu_gtt_mgr_recover - re-init gart
+ *
+ * @mgr: amdgpu_gtt_mgr pointer
+ *
+ * Re-init the gart for each known BO in the GTT.
+ */
+void amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr)
+{
+ struct ttm_range_mgr_node *node;
+ struct drm_mm_node *mm_node;
+ struct amdgpu_device *adev;
+
+ adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
+ spin_lock(&mgr->lock);
+ drm_mm_for_each_node(mm_node, &mgr->mm) {
+ node = container_of(mm_node, typeof(*node), mm_nodes[0]);
+ amdgpu_ttm_recover_gart(node->base.bo);
+ }
+ spin_unlock(&mgr->lock);
+}
+
+/**
+ * amdgpu_gtt_mgr_intersects - test for intersection
+ *
+ * @man: Our manager object
+ * @res: The resource to test
+ * @place: The place for the new allocation
+ * @size: The size of the new allocation
+ *
+ * Simplified intersection test, only interesting if we need GART or not.
+ */
+static bool amdgpu_gtt_mgr_intersects(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ return !place->lpfn || amdgpu_gtt_mgr_has_gart_addr(res);
+}
+
+/**
+ * amdgpu_gtt_mgr_compatible - test for compatibility
+ *
+ * @man: Our manager object
+ * @res: The resource to test
+ * @place: The place for the new allocation
+ * @size: The size of the new allocation
+ *
+ * Simplified compatibility test.
+ */
+static bool amdgpu_gtt_mgr_compatible(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ return !place->lpfn || amdgpu_gtt_mgr_has_gart_addr(res);
}
/**
* amdgpu_gtt_mgr_debug - dump VRAM table
*
* @man: TTM memory type manager
- * @prefix: text prefix
+ * @printer: DRM printer to use
*
* Dump the table content using printk.
*/
-static void amdgpu_gtt_mgr_debug(struct ttm_mem_type_manager *man,
- const char *prefix)
+static void amdgpu_gtt_mgr_debug(struct ttm_resource_manager *man,
+ struct drm_printer *printer)
{
- struct amdgpu_gtt_mgr *mgr = man->priv;
- struct drm_printer p = drm_debug_printer(prefix);
+ struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
spin_lock(&mgr->lock);
- drm_mm_print(&mgr->mm, &p);
+ drm_mm_print(&mgr->mm, printer);
spin_unlock(&mgr->lock);
}
-const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func = {
- .init = amdgpu_gtt_mgr_init,
- .takedown = amdgpu_gtt_mgr_fini,
- .get_node = amdgpu_gtt_mgr_new,
- .put_node = amdgpu_gtt_mgr_del,
+static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func = {
+ .alloc = amdgpu_gtt_mgr_new,
+ .free = amdgpu_gtt_mgr_del,
+ .intersects = amdgpu_gtt_mgr_intersects,
+ .compatible = amdgpu_gtt_mgr_compatible,
.debug = amdgpu_gtt_mgr_debug
};
+
+/**
+ * amdgpu_gtt_mgr_init - init GTT manager and DRM MM
+ *
+ * @adev: amdgpu_device pointer
+ * @gtt_size: maximum size of GTT
+ *
+ * Allocate and initialize the GTT manager.
+ */
+int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size)
+{
+ struct amdgpu_gtt_mgr *mgr = &adev->mman.gtt_mgr;
+ struct ttm_resource_manager *man = &mgr->manager;
+ uint64_t start, size;
+
+ man->use_tt = true;
+ man->func = &amdgpu_gtt_mgr_func;
+
+ ttm_resource_manager_init(man, &adev->mman.bdev, gtt_size);
+
+ start = AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GTT_NUM_TRANSFER_WINDOWS;
+ size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
+ drm_mm_init(&mgr->mm, start, size);
+ spin_lock_init(&mgr->lock);
+
+ ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, &mgr->manager);
+ ttm_resource_manager_set_used(man, true);
+ return 0;
+}
+
+/**
+ * amdgpu_gtt_mgr_fini - free and destroy GTT manager
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Destroy and free the GTT manager, returns -EBUSY if ranges are still
+ * allocated inside it.
+ */
+void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_gtt_mgr *mgr = &adev->mman.gtt_mgr;
+ struct ttm_resource_manager *man = &mgr->manager;
+ int ret;
+
+ ttm_resource_manager_set_used(man, false);
+
+ ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
+ if (ret)
+ return;
+
+ spin_lock(&mgr->lock);
+ drm_mm_takedown(&mgr->mm);
+ spin_unlock(&mgr->lock);
+
+ ttm_resource_manager_cleanup(man);
+ ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, NULL);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
new file mode 100644
index 000000000000..6e02fb9ac2f6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+#include <uapi/linux/kfd_ioctl.h>
+
+int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_hdp_ras *ras;
+
+ if (!adev->hdp.ras)
+ return 0;
+
+ ras = adev->hdp.ras;
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register hdp ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "hdp");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__HDP;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->hdp.ras_if = &ras->ras_block.ras_comm;
+
+ /* hdp ras follows amdgpu_ras_block_late_init_default for late init */
+ return 0;
+}
+
+void amdgpu_hdp_generic_flush(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg) {
+ WREG32((adev->rmmio_remap.reg_offset +
+ KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >>
+ 2,
+ 0);
+ if (adev->nbio.funcs->get_memsize)
+ adev->nbio.funcs->get_memsize(adev);
+ } else {
+ amdgpu_ring_emit_wreg(ring,
+ (adev->rmmio_remap.reg_offset +
+ KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >>
+ 2,
+ 0);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
new file mode 100644
index 000000000000..4cfd932b7e91
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_HDP_H__
+#define __AMDGPU_HDP_H__
+#include "amdgpu_ras.h"
+
+struct amdgpu_hdp_ras {
+ struct amdgpu_ras_block_object ras_block;
+};
+
+struct amdgpu_hdp_funcs {
+ void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
+ void (*invalidate_hdp)(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
+ void (*update_clock_gating)(struct amdgpu_device *adev, bool enable);
+ void (*get_clock_gating_state)(struct amdgpu_device *adev, u64 *flags);
+ void (*init_registers)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_hdp {
+ struct ras_common_if *ras_if;
+ const struct amdgpu_hdp_funcs *funcs;
+ struct amdgpu_hdp_ras *ras;
+};
+
+int amdgpu_hdp_ras_sw_init(struct amdgpu_device *adev);
+void amdgpu_hdp_generic_flush(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
+#endif /* __AMDGPU_HDP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
new file mode 100644
index 000000000000..2c6a6b858112
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
@@ -0,0 +1,248 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ * Christian König <christian.koenig@amd.com>
+ */
+
+/**
+ * DOC: MMU Notifier
+ *
+ * For coherent userptr handling registers an MMU notifier to inform the driver
+ * about updates on the page tables of a process.
+ *
+ * When somebody tries to invalidate the page tables we block the update until
+ * all operations on the pages in question are completed, then those pages are
+ * marked as accessed and also dirty if it wasn't a read only access.
+ *
+ * New command submissions using the userptrs in question are delayed until all
+ * page table invalidation are completed and we once more see a coherent process
+ * address space.
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <drm/drm.h>
+
+#include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_hmm.h"
+
+#define MAX_WALK_BYTE (2UL << 30)
+
+/**
+ * amdgpu_hmm_invalidate_gfx - callback to notify about mm change
+ *
+ * @mni: the range (mm) is about to update
+ * @range: details on the invalidation
+ * @cur_seq: Value to pass to mmu_interval_set_seq()
+ *
+ * Block for operations on BOs to finish and mark pages as accessed and
+ * potentially dirty.
+ */
+static bool amdgpu_hmm_invalidate_gfx(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ long r;
+
+ if (!mmu_notifier_range_blockable(range))
+ return false;
+
+ mutex_lock(&adev->notifier_lock);
+
+ mmu_interval_set_seq(mni, cur_seq);
+
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+ mutex_unlock(&adev->notifier_lock);
+ if (r <= 0)
+ DRM_ERROR("(%ld) failed to wait for user bo\n", r);
+ return true;
+}
+
+static const struct mmu_interval_notifier_ops amdgpu_hmm_gfx_ops = {
+ .invalidate = amdgpu_hmm_invalidate_gfx,
+};
+
+/**
+ * amdgpu_hmm_invalidate_hsa - callback to notify about mm change
+ *
+ * @mni: the range (mm) is about to update
+ * @range: details on the invalidation
+ * @cur_seq: Value to pass to mmu_interval_set_seq()
+ *
+ * We temporarily evict the BO attached to this range. This necessitates
+ * evicting all user-mode queues of the process.
+ */
+static bool amdgpu_hmm_invalidate_hsa(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier);
+
+ if (!mmu_notifier_range_blockable(range))
+ return false;
+
+ amdgpu_amdkfd_evict_userptr(mni, cur_seq, bo->kfd_bo);
+
+ return true;
+}
+
+static const struct mmu_interval_notifier_ops amdgpu_hmm_hsa_ops = {
+ .invalidate = amdgpu_hmm_invalidate_hsa,
+};
+
+/**
+ * amdgpu_hmm_register - register a BO for notifier updates
+ *
+ * @bo: amdgpu buffer object
+ * @addr: userptr addr we should monitor
+ *
+ * Registers a mmu_notifier for the given BO at the specified address.
+ * Returns 0 on success, -ERRNO if anything goes wrong.
+ */
+int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr)
+{
+ int r;
+
+ if (bo->kfd_bo)
+ r = mmu_interval_notifier_insert(&bo->notifier, current->mm,
+ addr, amdgpu_bo_size(bo),
+ &amdgpu_hmm_hsa_ops);
+ else
+ r = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
+ amdgpu_bo_size(bo),
+ &amdgpu_hmm_gfx_ops);
+ if (r)
+ /*
+ * Make sure amdgpu_hmm_unregister() doesn't call
+ * mmu_interval_notifier_remove() when the notifier isn't properly
+ * initialized.
+ */
+ bo->notifier.mm = NULL;
+
+ return r;
+}
+
+/**
+ * amdgpu_hmm_unregister - unregister a BO for notifier updates
+ *
+ * @bo: amdgpu buffer object
+ *
+ * Remove any registration of mmu notifier updates from the buffer object.
+ */
+void amdgpu_hmm_unregister(struct amdgpu_bo *bo)
+{
+ if (!bo->notifier.mm)
+ return;
+ mmu_interval_notifier_remove(&bo->notifier);
+ bo->notifier.mm = NULL;
+}
+
+int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
+ uint64_t start, uint64_t npages, bool readonly,
+ void *owner,
+ struct hmm_range **phmm_range)
+{
+ struct hmm_range *hmm_range;
+ unsigned long end;
+ unsigned long timeout;
+ unsigned long *pfns;
+ int r = 0;
+
+ hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL);
+ if (unlikely(!hmm_range))
+ return -ENOMEM;
+
+ pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
+ if (unlikely(!pfns)) {
+ r = -ENOMEM;
+ goto out_free_range;
+ }
+
+ hmm_range->notifier = notifier;
+ hmm_range->default_flags = HMM_PFN_REQ_FAULT;
+ if (!readonly)
+ hmm_range->default_flags |= HMM_PFN_REQ_WRITE;
+ hmm_range->hmm_pfns = pfns;
+ hmm_range->start = start;
+ end = start + npages * PAGE_SIZE;
+ hmm_range->dev_private_owner = owner;
+
+ do {
+ hmm_range->end = min(hmm_range->start + MAX_WALK_BYTE, end);
+
+ pr_debug("hmm range: start = 0x%lx, end = 0x%lx",
+ hmm_range->start, hmm_range->end);
+
+ timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+
+retry:
+ hmm_range->notifier_seq = mmu_interval_read_begin(notifier);
+ r = hmm_range_fault(hmm_range);
+ if (unlikely(r)) {
+ if (r == -EBUSY && !time_after(jiffies, timeout))
+ goto retry;
+ goto out_free_pfns;
+ }
+
+ if (hmm_range->end == end)
+ break;
+ hmm_range->hmm_pfns += MAX_WALK_BYTE >> PAGE_SHIFT;
+ hmm_range->start = hmm_range->end;
+ } while (hmm_range->end < end);
+
+ hmm_range->start = start;
+ hmm_range->hmm_pfns = pfns;
+
+ *phmm_range = hmm_range;
+
+ return 0;
+
+out_free_pfns:
+ kvfree(pfns);
+out_free_range:
+ kfree(hmm_range);
+
+ if (r == -EBUSY)
+ r = -EAGAIN;
+ return r;
+}
+
+bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range)
+{
+ bool r;
+
+ r = mmu_interval_read_retry(hmm_range->notifier,
+ hmm_range->notifier_seq);
+ kvfree(hmm_range->hmm_pfns);
+ kfree(hmm_range);
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
new file mode 100644
index 000000000000..953e1d06de20
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+#ifndef __AMDGPU_MN_H__
+#define __AMDGPU_MN_H__
+
+#include <linux/types.h>
+#include <linux/hmm.h>
+#include <linux/rwsem.h>
+#include <linux/workqueue.h>
+#include <linux/interval_tree.h>
+#include <linux/mmu_notifier.h>
+
+int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
+ uint64_t start, uint64_t npages, bool readonly,
+ void *owner,
+ struct hmm_range **phmm_range);
+bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
+
+#if defined(CONFIG_HMM_MIRROR)
+int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr);
+void amdgpu_hmm_unregister(struct amdgpu_bo *bo);
+#else
+static inline int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr)
+{
+ DRM_WARN_ONCE("HMM_MIRROR kernel config option is not enabled, "
+ "add CONFIG_ZONE_DEVICE=y in config file to fix this\n");
+ return -ENODEV;
+}
+static inline void amdgpu_hmm_unregister(struct amdgpu_bo *bo) {}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
index f2739995c335..9cb72f0c5277 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
@@ -23,9 +23,9 @@
* Authors: Dave Airlie
* Alex Deucher
*/
-#include <linux/export.h>
-#include <drm/drmP.h>
+#include <linux/pci.h>
+
#include <drm/drm_edid.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -39,7 +39,7 @@
static int amdgpu_i2c_pre_xfer(struct i2c_adapter *i2c_adap)
{
struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
- struct amdgpu_device *adev = i2c->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t temp;
@@ -81,7 +81,7 @@ static int amdgpu_i2c_pre_xfer(struct i2c_adapter *i2c_adap)
static void amdgpu_i2c_post_xfer(struct i2c_adapter *i2c_adap)
{
struct amdgpu_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
- struct amdgpu_device *adev = i2c->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t temp;
@@ -100,7 +100,7 @@ static void amdgpu_i2c_post_xfer(struct i2c_adapter *i2c_adap)
static int amdgpu_i2c_get_clock(void *i2c_priv)
{
struct amdgpu_i2c_chan *i2c = i2c_priv;
- struct amdgpu_device *adev = i2c->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
@@ -115,7 +115,7 @@ static int amdgpu_i2c_get_clock(void *i2c_priv)
static int amdgpu_i2c_get_data(void *i2c_priv)
{
struct amdgpu_i2c_chan *i2c = i2c_priv;
- struct amdgpu_device *adev = i2c->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
@@ -129,7 +129,7 @@ static int amdgpu_i2c_get_data(void *i2c_priv)
static void amdgpu_i2c_set_clock(void *i2c_priv, int clock)
{
struct amdgpu_i2c_chan *i2c = i2c_priv;
- struct amdgpu_device *adev = i2c->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
@@ -142,7 +142,7 @@ static void amdgpu_i2c_set_clock(void *i2c_priv, int clock)
static void amdgpu_i2c_set_data(void *i2c_priv, int data)
{
struct amdgpu_i2c_chan *i2c = i2c_priv;
- struct amdgpu_device *adev = i2c->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(i2c->dev);
struct amdgpu_i2c_bus_rec *rec = &i2c->rec;
uint32_t val;
@@ -174,8 +174,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
i2c->rec = *rec;
i2c->adapter.owner = THIS_MODULE;
- i2c->adapter.class = I2C_CLASS_DDC;
- i2c->adapter.dev.parent = &dev->pdev->dev;
+ i2c->adapter.dev.parent = dev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
mutex_init(&i2c->mutex);
@@ -185,7 +184,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
"AMDGPU i2c hw bus %s", name);
i2c->adapter.algo = &amdgpu_atombios_i2c_algo;
- ret = i2c_add_adapter(&i2c->adapter);
+ ret = devm_i2c_add_adapter(dev->dev, &i2c->adapter);
if (ret)
goto out_free;
} else {
@@ -216,22 +215,23 @@ out_free:
}
-void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c)
-{
- if (!i2c)
- return;
- WARN_ON(i2c->has_aux);
- i2c_del_adapter(&i2c->adapter);
- kfree(i2c);
-}
-
-/* Add the default buses */
void amdgpu_i2c_init(struct amdgpu_device *adev)
{
- if (amdgpu_hw_i2c)
- DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n");
-
- amdgpu_atombios_i2c_init(adev);
+ if (!adev->is_atom_fw) {
+ if (!amdgpu_device_has_dc_support(adev)) {
+ amdgpu_atombios_i2c_init(adev);
+ } else {
+ switch (adev->asic_type) {
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ amdgpu_atombios_oem_i2c_init(adev, 0x97);
+ break;
+ default:
+ break;
+ }
+ }
+ }
}
/* remove all the buses */
@@ -239,28 +239,9 @@ void amdgpu_i2c_fini(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
- if (adev->i2c_bus[i]) {
- amdgpu_i2c_destroy(adev->i2c_bus[i]);
+ for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++)
+ if (adev->i2c_bus[i])
adev->i2c_bus[i] = NULL;
- }
- }
-}
-
-/* Add additional buses */
-void amdgpu_i2c_add(struct amdgpu_device *adev,
- const struct amdgpu_i2c_bus_rec *rec,
- const char *name)
-{
- struct drm_device *dev = adev->ddev;
- int i;
-
- for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) {
- if (!adev->i2c_bus[i]) {
- adev->i2c_bus[i] = amdgpu_i2c_create(dev, rec, name);
- return;
- }
- }
}
/* looks up bus based on id */
@@ -279,7 +260,7 @@ amdgpu_i2c_lookup(struct amdgpu_device *adev,
return NULL;
}
-static void amdgpu_i2c_get_byte(struct amdgpu_i2c_chan *i2c_bus,
+static int amdgpu_i2c_get_byte(struct amdgpu_i2c_chan *i2c_bus,
u8 slave_addr,
u8 addr,
u8 *val)
@@ -304,16 +285,18 @@ static void amdgpu_i2c_get_byte(struct amdgpu_i2c_chan *i2c_bus,
out_buf[0] = addr;
out_buf[1] = 0;
- if (i2c_transfer(&i2c_bus->adapter, msgs, 2) == 2) {
- *val = in_buf[0];
- DRM_DEBUG("val = 0x%02x\n", *val);
- } else {
- DRM_DEBUG("i2c 0x%02x 0x%02x read failed\n",
- addr, *val);
+ if (i2c_transfer(&i2c_bus->adapter, msgs, 2) != 2) {
+ DRM_DEBUG("i2c 0x%02x read failed\n", addr);
+ return -EIO;
}
+
+ *val = in_buf[0];
+ DRM_DEBUG("val = 0x%02x\n", *val);
+
+ return 0;
}
-static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
+static int amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
u8 slave_addr,
u8 addr,
u8 val)
@@ -329,16 +312,19 @@ static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
out_buf[0] = addr;
out_buf[1] = val;
- if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
- DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n",
- addr, val);
+ if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1) {
+ DRM_DEBUG("i2c 0x%02x 0x%02x write failed\n", addr, val);
+ return -EIO;
+ }
+
+ return 0;
}
/* ddc router switching */
void
amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connector)
{
- u8 val;
+ u8 val = 0;
if (!amdgpu_connector->router.ddc_valid)
return;
@@ -346,16 +332,18 @@ amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connecto
if (!amdgpu_connector->router_bus)
return;
- amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
+ if (amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
amdgpu_connector->router.i2c_addr,
- 0x3, &val);
+ 0x3, &val))
+ return;
val &= ~amdgpu_connector->router.ddc_mux_control_pin;
amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
amdgpu_connector->router.i2c_addr,
0x3, val);
- amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
+ if (amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
amdgpu_connector->router.i2c_addr,
- 0x1, &val);
+ 0x1, &val))
+ return;
val &= ~amdgpu_connector->router.ddc_mux_control_pin;
val |= amdgpu_connector->router.ddc_mux_state;
amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
@@ -375,16 +363,18 @@ amdgpu_i2c_router_select_cd_port(const struct amdgpu_connector *amdgpu_connector
if (!amdgpu_connector->router_bus)
return;
- amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
+ if (amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
amdgpu_connector->router.i2c_addr,
- 0x3, &val);
+ 0x3, &val))
+ return;
val &= ~amdgpu_connector->router.cd_mux_control_pin;
amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
amdgpu_connector->router.i2c_addr,
0x3, val);
- amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
+ if (amdgpu_i2c_get_byte(amdgpu_connector->router_bus,
amdgpu_connector->router.i2c_addr,
- 0x1, &val);
+ 0x1, &val))
+ return;
val &= ~amdgpu_connector->router.cd_mux_control_pin;
val |= amdgpu_connector->router.cd_mux_state;
amdgpu_i2c_put_byte(amdgpu_connector->router_bus,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
index 63c2ff7499e1..1d3d3806e0dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.h
@@ -30,9 +30,6 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev,
void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c);
void amdgpu_i2c_init(struct amdgpu_device *adev);
void amdgpu_i2c_fini(struct amdgpu_device *adev);
-void amdgpu_i2c_add(struct amdgpu_device *adev,
- const struct amdgpu_i2c_bus_rec *rec,
- const char *name);
struct amdgpu_i2c_chan *
amdgpu_i2c_lookup(struct amdgpu_device *adev,
const struct amdgpu_i2c_bus_rec *i2c_bus);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 6e4ae0d983c2..7d9bcb72e8dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -28,12 +28,15 @@
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <drm/drmP.h>
+
#include <drm/amdgpu_drm.h>
+
#include "amdgpu.h"
#include "atom.h"
+#include "amdgpu_trace.h"
#define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000)
+#define AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT msecs_to_jiffies(2000)
/*
* IB
@@ -44,13 +47,14 @@
* produce command buffers which are send to the kernel and
* put in IBs for execution by the requested ring.
*/
-static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
/**
* amdgpu_ib_get - request an IB (Indirect Buffer)
*
- * @ring: ring index the IB is associated with
+ * @adev: amdgpu_device pointer
+ * @vm: amdgpu_vm pointer
* @size: requested IB size
+ * @pool_type: IB pool type (delayed, immediate, direct)
* @ib: IB object returned
*
* Request an IB (all asics). IBs are allocated using the
@@ -58,19 +62,22 @@ static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
* Returns 0 on success, error on failure.
*/
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- unsigned size, struct amdgpu_ib *ib)
+ unsigned int size, enum amdgpu_ib_pool_type pool_type,
+ struct amdgpu_ib *ib)
{
int r;
if (size) {
- r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
- &ib->sa_bo, size, 256);
+ r = amdgpu_sa_bo_new(&adev->ib_pools[pool_type],
+ &ib->sa_bo, size);
if (r) {
dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
return r;
}
ib->ptr = amdgpu_sa_bo_cpu_addr(ib->sa_bo);
+ /* flush the cache before commit the IB */
+ ib->flags = AMDGPU_IB_FLAG_EMIT_MEM_SYNC;
if (!vm)
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
@@ -82,24 +89,23 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
/**
* amdgpu_ib_free - free an IB (Indirect Buffer)
*
- * @adev: amdgpu_device pointer
* @ib: IB object to free
* @f: the fence SA bo need wait on for the ib alloation
*
* Free an IB (all asics).
*/
-void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
- struct dma_fence *f)
+void amdgpu_ib_free(struct amdgpu_ib *ib, struct dma_fence *f)
{
- amdgpu_sa_bo_free(adev, &ib->sa_bo, f);
+ amdgpu_sa_bo_free(&ib->sa_bo, f);
}
/**
* amdgpu_ib_schedule - schedule an IB (Indirect Buffer) on the ring
*
- * @adev: amdgpu_device pointer
+ * @ring: ring index the IB is associated with
* @num_ibs: number of IBs to schedule
* @ibs: IB objects to schedule
+ * @job: job to schedule
* @f: fence created during this submission
*
* Schedule an IB on the associated ring (all asics).
@@ -115,19 +121,25 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
* a CONST_IB), it will be put on the ring prior to the DE IB. Prior
* to SI there was just a DE IB.
*/
-int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs,
struct amdgpu_ib *ibs, struct amdgpu_job *job,
struct dma_fence **f)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0];
- bool skip_preamble, need_ctx_switch;
- unsigned patch_offset = ~0;
+ struct dma_fence *tmp = NULL;
+ struct amdgpu_fence *af;
+ bool need_ctx_switch;
struct amdgpu_vm *vm;
uint64_t fence_ctx;
uint32_t status = 0, alloc_size;
-
- unsigned i;
+ unsigned int fence_flags = 0;
+ bool secure, init_shadow;
+ u64 shadow_va, csa_va, gds_va;
+ int vmid = AMDGPU_JOB_GET_VMID(job);
+ bool need_pipe_sync = false;
+ unsigned int cond_exec;
+ unsigned int i;
int r = 0;
if (num_ibs == 0)
@@ -136,22 +148,44 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
/* ring tests don't use a job */
if (job) {
vm = job->vm;
- fence_ctx = job->fence_ctx;
+ fence_ctx = job->base.s_fence ?
+ job->base.s_fence->scheduled.context : 0;
+ shadow_va = job->shadow_va;
+ csa_va = job->csa_va;
+ gds_va = job->gds_va;
+ init_shadow = job->init_shadow;
+ af = &job->hw_fence;
+ /* Save the context of the job for reset handling.
+ * The driver needs this so it can skip the ring
+ * contents for guilty contexts.
+ */
+ af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0;
} else {
vm = NULL;
fence_ctx = 0;
+ shadow_va = 0;
+ csa_va = 0;
+ gds_va = 0;
+ init_shadow = false;
+ af = NULL;
}
- if (!ring->ready) {
+ if (!ring->sched.ready) {
dev_err(adev->dev, "couldn't schedule ib on ring <%s>\n", ring->name);
return -EINVAL;
}
- if (vm && !job->vm_id) {
+ if (vm && !job->vmid) {
dev_err(adev->dev, "VM IB without ID\n");
return -EINVAL;
}
+ if ((ib->flags & AMDGPU_IB_FLAGS_SECURE) &&
+ (!ring->funcs->secure_submission_supported)) {
+ dev_err(adev->dev, "secure submissions not supported on ring <%s>\n", ring->name);
+ return -EINVAL;
+ }
+
alloc_size = ring->funcs->emit_frame_size + num_ibs *
ring->funcs->emit_ib_size;
@@ -160,67 +194,106 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
dev_err(adev->dev, "scheduling IB failed (%d).\n", r);
return r;
}
- if (ring->funcs->emit_pipeline_sync && job && job->need_pipeline_sync)
- amdgpu_ring_emit_pipeline_sync(ring);
- if (vm) {
- r = amdgpu_vm_flush(ring, job);
+ need_ctx_switch = ring->current_ctx != fence_ctx;
+ if (ring->funcs->emit_pipeline_sync && job &&
+ ((tmp = amdgpu_sync_get_fence(&job->explicit_sync)) ||
+ need_ctx_switch || amdgpu_vm_need_pipeline_sync(ring, job))) {
+
+ need_pipe_sync = true;
+
+ if (tmp)
+ trace_amdgpu_ib_pipe_sync(job, tmp);
+
+ dma_fence_put(tmp);
+ }
+
+ if ((ib->flags & AMDGPU_IB_FLAG_EMIT_MEM_SYNC) && ring->funcs->emit_mem_sync)
+ ring->funcs->emit_mem_sync(ring);
+
+ if (ring->funcs->emit_wave_limit &&
+ ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH)
+ ring->funcs->emit_wave_limit(ring, true);
+
+ if (ring->funcs->insert_start)
+ ring->funcs->insert_start(ring);
+
+ if (job) {
+ r = amdgpu_vm_flush(ring, job, need_pipe_sync);
if (r) {
amdgpu_ring_undo(ring);
return r;
}
}
+ amdgpu_ring_ib_begin(ring);
+
+ if (ring->funcs->emit_gfx_shadow)
+ amdgpu_ring_emit_gfx_shadow(ring, shadow_va, csa_va, gds_va,
+ init_shadow, vmid);
+
if (ring->funcs->init_cond_exec)
- patch_offset = amdgpu_ring_init_cond_exec(ring);
+ cond_exec = amdgpu_ring_init_cond_exec(ring,
+ ring->cond_exe_gpu_addr);
- if (ring->funcs->emit_hdp_flush
-#ifdef CONFIG_X86_64
- && !(adev->flags & AMD_IS_APU)
-#endif
- )
- amdgpu_ring_emit_hdp_flush(ring);
+ amdgpu_device_flush_hdp(adev, ring);
+
+ if (need_ctx_switch)
+ status |= AMDGPU_HAVE_CTX_SWITCH;
- skip_preamble = ring->current_ctx == fence_ctx;
- need_ctx_switch = ring->current_ctx != fence_ctx;
if (job && ring->funcs->emit_cntxcntl) {
- if (need_ctx_switch)
- status |= AMDGPU_HAVE_CTX_SWITCH;
status |= job->preamble_status;
-
- if (vm)
- status |= AMDGPU_VM_DOMAIN;
+ status |= job->preemption_status;
amdgpu_ring_emit_cntxcntl(ring, status);
}
+ /* Setup initial TMZiness and send it off.
+ */
+ secure = false;
+ if (job && ring->funcs->emit_frame_cntl) {
+ secure = ib->flags & AMDGPU_IB_FLAGS_SECURE;
+ amdgpu_ring_emit_frame_cntl(ring, true, secure);
+ }
+
for (i = 0; i < num_ibs; ++i) {
ib = &ibs[i];
- /* drop preamble IBs if we don't have a context switch */
- if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
- skip_preamble &&
- !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) &&
- !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
- continue;
+ if (job && ring->funcs->emit_frame_cntl) {
+ if (secure != !!(ib->flags & AMDGPU_IB_FLAGS_SECURE)) {
+ amdgpu_ring_emit_frame_cntl(ring, false, secure);
+ secure = !secure;
+ amdgpu_ring_emit_frame_cntl(ring, true, secure);
+ }
+ }
- amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
- need_ctx_switch);
- need_ctx_switch = false;
+ amdgpu_ring_emit_ib(ring, job, ib, status);
+ status &= ~AMDGPU_HAVE_CTX_SWITCH;
}
- if (ring->funcs->emit_hdp_invalidate
-#ifdef CONFIG_X86_64
- && !(adev->flags & AMD_IS_APU)
-#endif
- )
- amdgpu_ring_emit_hdp_invalidate(ring);
+ if (job && ring->funcs->emit_frame_cntl)
+ amdgpu_ring_emit_frame_cntl(ring, false, secure);
+
+ amdgpu_device_invalidate_hdp(adev, ring);
+
+ if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
+ fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
+
+ /* wrap the last IB with fence */
+ if (job && job->uf_addr) {
+ amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
+ fence_flags | AMDGPU_FENCE_FLAG_64BIT);
+ }
+
+ if (ring->funcs->emit_gfx_shadow && ring->funcs->init_cond_exec) {
+ amdgpu_ring_emit_gfx_shadow(ring, 0, 0, 0, false, 0);
+ amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr);
+ }
- r = amdgpu_fence_emit(ring, f);
+ r = amdgpu_fence_emit(ring, f, af, fence_flags);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
- if (job && job->vm_id)
- amdgpu_vm_reset_id(adev, ring->funcs->vmhub,
- job->vm_id);
+ if (job && job->vmid)
+ amdgpu_vmid_reset(adev, ring->vm_hub, job->vmid);
amdgpu_ring_undo(ring);
return r;
}
@@ -228,19 +301,27 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (ring->funcs->insert_end)
ring->funcs->insert_end(ring);
- /* wrap the last IB with fence */
- if (job && job->uf_addr) {
- amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
- AMDGPU_FENCE_FLAG_64BIT);
- }
-
- if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
- amdgpu_ring_patch_cond_exec(ring, patch_offset);
+ amdgpu_ring_patch_cond_exec(ring, cond_exec);
ring->current_ctx = fence_ctx;
- if (vm && ring->funcs->emit_switch_buffer)
+ if (job && ring->funcs->emit_switch_buffer)
amdgpu_ring_emit_switch_buffer(ring);
+
+ if (ring->funcs->emit_wave_limit &&
+ ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH)
+ ring->funcs->emit_wave_limit(ring, false);
+
+ /* Save the wptr associated with this fence.
+ * This must be last for resets to work properly
+ * as we need to save the wptr associated with this
+ * fence so we know what rings contents to backup
+ * after we reset the queue.
+ */
+ amdgpu_fence_save_wptr(*f);
+
+ amdgpu_ring_ib_end(ring);
amdgpu_ring_commit(ring);
+
return 0;
}
@@ -255,29 +336,26 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
*/
int amdgpu_ib_pool_init(struct amdgpu_device *adev)
{
- int r;
+ int r, i;
- if (adev->ib_pool_ready) {
+ if (adev->ib_pool_ready)
return 0;
- }
- r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo,
- AMDGPU_IB_POOL_SIZE*64*1024,
- AMDGPU_GPU_PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT);
- if (r) {
- return r;
- }
- r = amdgpu_sa_bo_manager_start(adev, &adev->ring_tmp_bo);
- if (r) {
- return r;
+ for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) {
+ r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i],
+ AMDGPU_IB_POOL_SIZE, 256,
+ AMDGPU_GEM_DOMAIN_GTT);
+ if (r)
+ goto error;
}
-
adev->ib_pool_ready = true;
- if (amdgpu_debugfs_sa_init(adev)) {
- dev_err(adev->dev, "failed to register debugfs file for SA\n");
- }
+
return 0;
+
+error:
+ while (i--)
+ amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]);
+ return r;
}
/**
@@ -290,11 +368,14 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
*/
void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
{
- if (adev->ib_pool_ready) {
- amdgpu_sa_bo_manager_suspend(adev, &adev->ring_tmp_bo);
- amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo);
- adev->ib_pool_ready = false;
- }
+ int i;
+
+ if (!adev->ib_pool_ready)
+ return;
+
+ for (i = 0; i < AMDGPU_IB_POOL_MAX; i++)
+ amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]);
+ adev->ib_pool_ready = false;
}
/**
@@ -309,30 +390,74 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
*/
int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
{
- unsigned i;
+ long tmo_gfx, tmo_mm;
int r, ret = 0;
+ unsigned int i;
+
+ tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
+ if (amdgpu_sriov_vf(adev)) {
+ /* for MM engines in hypervisor side they are not scheduled together
+ * with CP and SDMA engines, so even in exclusive mode MM engine could
+ * still running on other VF thus the IB TEST TIMEOUT for MM engines
+ * under SR-IOV should be set to a long time. 8 sec should be enough
+ * for the MM comes back to this VF.
+ */
+ tmo_mm = 8 * AMDGPU_IB_TEST_TIMEOUT;
+ }
+
+ if (amdgpu_sriov_runtime(adev)) {
+ /* for CP & SDMA engines since they are scheduled together so
+ * need to make the timeout width enough to cover the time
+ * cost waiting for it coming back under RUNTIME only
+ */
+ tmo_gfx = 8 * AMDGPU_IB_TEST_TIMEOUT;
+ } else if (adev->gmc.xgmi.hive_id) {
+ tmo_gfx = AMDGPU_IB_TEST_GFX_XGMI_TIMEOUT;
+ }
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ for (i = 0; i < adev->num_rings; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
+ long tmo;
- if (!ring || !ring->ready)
+ /* KIQ rings don't have an IB test because we never submit IBs
+ * to them and they have no interrupt support.
+ */
+ if (!ring->sched.ready || !ring->funcs->test_ib)
continue;
- r = amdgpu_ring_test_ib(ring, AMDGPU_IB_TEST_TIMEOUT);
- if (r) {
- ring->ready = false;
-
- if (ring == &adev->gfx.gfx_ring[0]) {
- /* oh, oh, that's really bad */
- DRM_ERROR("amdgpu: failed testing IB on GFX ring (%d).\n", r);
- adev->accel_working = false;
- return r;
-
- } else {
- /* still not good, but we can live with it */
- DRM_ERROR("amdgpu: failed testing IB on ring %d (%d).\n", i, r);
- ret = r;
- }
+ if (adev->enable_mes &&
+ ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ continue;
+
+ /* MM engine need more time */
+ if (ring->funcs->type == AMDGPU_RING_TYPE_UVD ||
+ ring->funcs->type == AMDGPU_RING_TYPE_VCE ||
+ ring->funcs->type == AMDGPU_RING_TYPE_UVD_ENC ||
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_DEC ||
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG)
+ tmo = tmo_mm;
+ else
+ tmo = tmo_gfx;
+
+ r = amdgpu_ring_test_ib(ring, tmo);
+ if (!r) {
+ DRM_DEV_DEBUG(adev->dev, "ib test on %s succeeded\n",
+ ring->name);
+ continue;
+ }
+
+ ring->sched.ready = false;
+ DRM_DEV_ERROR(adev->dev, "IB test failed on %s (%d).\n",
+ ring->name, r);
+
+ if (ring == &adev->gfx.gfx_ring[0]) {
+ /* oh, oh, that's really bad */
+ adev->accel_working = false;
+ return r;
+
+ } else {
+ ret = r;
}
}
return ret;
@@ -343,29 +468,34 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
*/
#if defined(CONFIG_DEBUG_FS)
-static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
+static int amdgpu_debugfs_sa_info_show(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = m->private;
- amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m);
+ seq_puts(m, "--------------------- DELAYED ---------------------\n");
+ amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED],
+ m);
+ seq_puts(m, "-------------------- IMMEDIATE --------------------\n");
+ amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_IMMEDIATE],
+ m);
+ seq_puts(m, "--------------------- DIRECT ----------------------\n");
+ amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT], m);
return 0;
-
}
-static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
- {"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL},
-};
+DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_sa_info);
#endif
-static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
+void amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
- return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1);
-#else
- return 0;
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+
+ debugfs_create_file("amdgpu_sa_info", 0444, root, adev,
+ &amdgpu_debugfs_sa_info_fops);
+
#endif
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
new file mode 100644
index 000000000000..3ef5bc95642c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -0,0 +1,642 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu_ids.h"
+
+#include <linux/idr.h>
+#include <linux/dma-fence-array.h>
+
+
+#include "amdgpu.h"
+#include "amdgpu_trace.h"
+
+/*
+ * PASID manager
+ *
+ * PASIDs are global address space identifiers that can be shared
+ * between the GPU, an IOMMU and the driver. VMs on different devices
+ * may use the same PASID if they share the same address
+ * space. Therefore PASIDs are allocated using a global IDA. VMs are
+ * looked up from the PASID per amdgpu_device.
+ */
+static DEFINE_IDA(amdgpu_pasid_ida);
+
+/* Helper to free pasid from a fence callback */
+struct amdgpu_pasid_cb {
+ struct dma_fence_cb cb;
+ u32 pasid;
+};
+
+/**
+ * amdgpu_pasid_alloc - Allocate a PASID
+ * @bits: Maximum width of the PASID in bits, must be at least 1
+ *
+ * Allocates a PASID of the given width while keeping smaller PASIDs
+ * available if possible.
+ *
+ * Returns a positive integer on success. Returns %-EINVAL if bits==0.
+ * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
+ * memory allocation failure.
+ */
+int amdgpu_pasid_alloc(unsigned int bits)
+{
+ int pasid = -EINVAL;
+
+ for (bits = min(bits, 31U); bits > 0; bits--) {
+ pasid = ida_alloc_range(&amdgpu_pasid_ida, 1U << (bits - 1),
+ (1U << bits) - 1, GFP_KERNEL);
+ if (pasid != -ENOSPC)
+ break;
+ }
+
+ if (pasid >= 0)
+ trace_amdgpu_pasid_allocated(pasid);
+
+ return pasid;
+}
+
+/**
+ * amdgpu_pasid_free - Free a PASID
+ * @pasid: PASID to free
+ */
+void amdgpu_pasid_free(u32 pasid)
+{
+ trace_amdgpu_pasid_freed(pasid);
+ ida_free(&amdgpu_pasid_ida, pasid);
+}
+
+static void amdgpu_pasid_free_cb(struct dma_fence *fence,
+ struct dma_fence_cb *_cb)
+{
+ struct amdgpu_pasid_cb *cb =
+ container_of(_cb, struct amdgpu_pasid_cb, cb);
+
+ amdgpu_pasid_free(cb->pasid);
+ dma_fence_put(fence);
+ kfree(cb);
+}
+
+/**
+ * amdgpu_pasid_free_delayed - free pasid when fences signal
+ *
+ * @resv: reservation object with the fences to wait for
+ * @pasid: pasid to free
+ *
+ * Free the pasid only after all the fences in resv are signaled.
+ */
+void amdgpu_pasid_free_delayed(struct dma_resv *resv,
+ u32 pasid)
+{
+ struct amdgpu_pasid_cb *cb;
+ struct dma_fence *fence;
+ int r;
+
+ r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
+ if (r)
+ goto fallback;
+
+ if (!fence) {
+ amdgpu_pasid_free(pasid);
+ return;
+ }
+
+ cb = kmalloc(sizeof(*cb), GFP_KERNEL);
+ if (!cb) {
+ /* Last resort when we are OOM */
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+ amdgpu_pasid_free(pasid);
+ } else {
+ cb->pasid = pasid;
+ if (dma_fence_add_callback(fence, &cb->cb,
+ amdgpu_pasid_free_cb))
+ amdgpu_pasid_free_cb(fence, &cb->cb);
+ }
+
+ return;
+
+fallback:
+ /* Not enough memory for the delayed delete, as last resort
+ * block for all the fences to complete.
+ */
+ dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+ amdgpu_pasid_free(pasid);
+}
+
+/*
+ * VMID manager
+ *
+ * VMIDs are a per VMHUB identifier for page tables handling.
+ */
+
+/**
+ * amdgpu_vmid_had_gpu_reset - check if reset occured since last use
+ *
+ * @adev: amdgpu_device pointer
+ * @id: VMID structure
+ *
+ * Check if GPU reset occured since last use of the VMID.
+ */
+bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
+ struct amdgpu_vmid *id)
+{
+ return id->current_gpu_reset_count !=
+ atomic_read(&adev->gpu_reset_counter);
+}
+
+/* Check if we need to switch to another set of resources */
+static bool amdgpu_vmid_gds_switch_needed(struct amdgpu_vmid *id,
+ struct amdgpu_job *job)
+{
+ return id->gds_base != job->gds_base ||
+ id->gds_size != job->gds_size ||
+ id->gws_base != job->gws_base ||
+ id->gws_size != job->gws_size ||
+ id->oa_base != job->oa_base ||
+ id->oa_size != job->oa_size;
+}
+
+/* Check if the id is compatible with the job */
+static bool amdgpu_vmid_compatible(struct amdgpu_vmid *id,
+ struct amdgpu_job *job)
+{
+ return id->pd_gpu_addr == job->vm_pd_addr &&
+ !amdgpu_vmid_gds_switch_needed(id, job);
+}
+
+/**
+ * amdgpu_vmid_grab_idle - grab idle VMID
+ *
+ * @ring: ring we want to submit job to
+ * @idle: resulting idle VMID
+ * @fence: fence to wait for if no id could be grabbed
+ *
+ * Try to find an idle VMID, if none is idle add a fence to wait to the sync
+ * object. Returns -ENOMEM when we are out of memory.
+ */
+static int amdgpu_vmid_grab_idle(struct amdgpu_ring *ring,
+ struct amdgpu_vmid **idle,
+ struct dma_fence **fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ unsigned vmhub = ring->vm_hub;
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ struct dma_fence **fences;
+ unsigned i;
+
+ if (!dma_fence_is_signaled(ring->vmid_wait)) {
+ *fence = dma_fence_get(ring->vmid_wait);
+ return 0;
+ }
+
+ fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_NOWAIT);
+ if (!fences)
+ return -ENOMEM;
+
+ /* Check if we have an idle VMID */
+ i = 0;
+ list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
+ /* Don't use per engine and per process VMID at the same time */
+ struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
+ NULL : ring;
+
+ fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
+ if (!fences[i])
+ break;
+ ++i;
+ }
+
+ /* If we can't find a idle VMID to use, wait till one becomes available */
+ if (&(*idle)->list == &id_mgr->ids_lru) {
+ u64 fence_context = adev->vm_manager.fence_context + ring->idx;
+ unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
+ struct dma_fence_array *array;
+ unsigned j;
+
+ *idle = NULL;
+ for (j = 0; j < i; ++j)
+ dma_fence_get(fences[j]);
+
+ array = dma_fence_array_create(i, fences, fence_context,
+ seqno, true);
+ if (!array) {
+ for (j = 0; j < i; ++j)
+ dma_fence_put(fences[j]);
+ kfree(fences);
+ return -ENOMEM;
+ }
+
+ *fence = dma_fence_get(&array->base);
+ dma_fence_put(ring->vmid_wait);
+ ring->vmid_wait = &array->base;
+ return 0;
+ }
+ kfree(fences);
+
+ return 0;
+}
+
+/**
+ * amdgpu_vmid_grab_reserved - try to assign reserved VMID
+ *
+ * @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ * @job: job who wants to use the VMID
+ * @id: resulting VMID
+ * @fence: fence to wait for if no id could be grabbed
+ *
+ * Try to assign a reserved VMID.
+ */
+static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
+ struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
+ struct amdgpu_vmid **id,
+ struct dma_fence **fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ unsigned vmhub = ring->vm_hub;
+ uint64_t fence_context = adev->fence_context + ring->idx;
+ bool needs_flush = vm->use_cpu_for_update;
+ uint64_t updates = amdgpu_vm_tlb_seq(vm);
+ int r;
+
+ *id = vm->reserved_vmid[vmhub];
+ if ((*id)->owner != vm->immediate.fence_context ||
+ !amdgpu_vmid_compatible(*id, job) ||
+ (*id)->flushed_updates < updates ||
+ !(*id)->last_flush ||
+ ((*id)->last_flush->context != fence_context &&
+ !dma_fence_is_signaled((*id)->last_flush)))
+ needs_flush = true;
+
+ if ((*id)->owner != vm->immediate.fence_context ||
+ (!adev->vm_manager.concurrent_flush && needs_flush)) {
+ struct dma_fence *tmp;
+
+ /* Don't use per engine and per process VMID at the
+ * same time
+ */
+ if (adev->vm_manager.concurrent_flush)
+ ring = NULL;
+
+ /* to prevent one context starved by another context */
+ (*id)->pd_gpu_addr = 0;
+ tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
+ if (tmp) {
+ *id = NULL;
+ *fence = dma_fence_get(tmp);
+ return 0;
+ }
+ }
+
+ /* Good we can use this VMID. Remember this submission as
+ * user of the VMID.
+ */
+ r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished,
+ GFP_NOWAIT);
+ if (r)
+ return r;
+
+ job->vm_needs_flush = needs_flush;
+ job->spm_update_needed = true;
+ return 0;
+}
+
+/**
+ * amdgpu_vmid_grab_used - try to reuse a VMID
+ *
+ * @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ * @job: job who wants to use the VMID
+ * @id: resulting VMID
+ *
+ * Try to reuse a VMID for this submission.
+ */
+static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
+ struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
+ struct amdgpu_vmid **id)
+{
+ struct amdgpu_device *adev = ring->adev;
+ unsigned vmhub = ring->vm_hub;
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ uint64_t fence_context = adev->fence_context + ring->idx;
+ uint64_t updates = amdgpu_vm_tlb_seq(vm);
+ int r;
+
+ job->vm_needs_flush = vm->use_cpu_for_update;
+
+ /* Check if we can use a VMID already assigned to this VM */
+ list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) {
+ bool needs_flush = vm->use_cpu_for_update;
+
+ /* Check all the prerequisites to using this VMID */
+ if ((*id)->owner != vm->immediate.fence_context)
+ continue;
+
+ if (!amdgpu_vmid_compatible(*id, job))
+ continue;
+
+ if (!(*id)->last_flush ||
+ ((*id)->last_flush->context != fence_context &&
+ !dma_fence_is_signaled((*id)->last_flush)))
+ needs_flush = true;
+
+ if ((*id)->flushed_updates < updates)
+ needs_flush = true;
+
+ if (needs_flush && !adev->vm_manager.concurrent_flush)
+ continue;
+
+ /* Good, we can use this VMID. Remember this submission as
+ * user of the VMID.
+ */
+ r = amdgpu_sync_fence(&(*id)->active,
+ &job->base.s_fence->finished,
+ GFP_NOWAIT);
+ if (r)
+ return r;
+
+ job->vm_needs_flush |= needs_flush;
+ return 0;
+ }
+
+ *id = NULL;
+ return 0;
+}
+
+/**
+ * amdgpu_vmid_grab - allocate the next free VMID
+ *
+ * @vm: vm to allocate id for
+ * @ring: ring we want to submit job to
+ * @job: job who wants to use the VMID
+ * @fence: fence to wait for if no id could be grabbed
+ *
+ * Allocate an id for the vm, adding fences to the sync obj as necessary.
+ */
+int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ struct amdgpu_job *job, struct dma_fence **fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ unsigned vmhub = ring->vm_hub;
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ struct amdgpu_vmid *idle = NULL;
+ struct amdgpu_vmid *id = NULL;
+ int r = 0;
+
+ mutex_lock(&id_mgr->lock);
+ r = amdgpu_vmid_grab_idle(ring, &idle, fence);
+ if (r || !idle)
+ goto error;
+
+ if (amdgpu_vmid_uses_reserved(vm, vmhub)) {
+ r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
+ if (r || !id)
+ goto error;
+ } else {
+ r = amdgpu_vmid_grab_used(vm, ring, job, &id);
+ if (r)
+ goto error;
+
+ if (!id) {
+ /* Still no ID to use? Then use the idle one found earlier */
+ id = idle;
+
+ /* Remember this submission as user of the VMID */
+ r = amdgpu_sync_fence(&id->active,
+ &job->base.s_fence->finished,
+ GFP_NOWAIT);
+ if (r)
+ goto error;
+
+ job->vm_needs_flush = true;
+ }
+
+ list_move_tail(&id->list, &id_mgr->ids_lru);
+ }
+
+ job->gds_switch_needed = amdgpu_vmid_gds_switch_needed(id, job);
+ if (job->vm_needs_flush) {
+ id->flushed_updates = amdgpu_vm_tlb_seq(vm);
+ dma_fence_put(id->last_flush);
+ id->last_flush = NULL;
+ }
+ job->vmid = id - id_mgr->ids;
+ job->pasid = vm->pasid;
+
+ id->gds_base = job->gds_base;
+ id->gds_size = job->gds_size;
+ id->gws_base = job->gws_base;
+ id->gws_size = job->gws_size;
+ id->oa_base = job->oa_base;
+ id->oa_size = job->oa_size;
+ id->pd_gpu_addr = job->vm_pd_addr;
+ id->owner = vm->immediate.fence_context;
+
+ trace_amdgpu_vm_grab_id(vm, ring, job);
+
+error:
+ mutex_unlock(&id_mgr->lock);
+ return r;
+}
+
+/*
+ * amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID
+ * @vm: the VM to check
+ * @vmhub: the VMHUB which will be used
+ *
+ * Returns: True if the VM will use a reserved VMID.
+ */
+bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub)
+{
+ return vm->reserved_vmid[vmhub];
+}
+
+/*
+ * amdgpu_vmid_alloc_reserved - reserve a specific VMID for this vm
+ * @adev: amdgpu device structure
+ * @vm: the VM to reserve an ID for
+ * @vmhub: the VMHUB which should be used
+ *
+ * Mostly used to have a reserved VMID for debugging and SPM.
+ *
+ * Returns: 0 for success, -ENOENT if an ID is already reserved.
+ */
+int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ unsigned vmhub)
+{
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ struct amdgpu_vmid *id;
+ int r = 0;
+
+ mutex_lock(&id_mgr->lock);
+ if (vm->reserved_vmid[vmhub])
+ goto unlock;
+ if (id_mgr->reserved_vmid) {
+ r = -ENOENT;
+ goto unlock;
+ }
+ /* Remove from normal round robin handling */
+ id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, list);
+ list_del_init(&id->list);
+ vm->reserved_vmid[vmhub] = id;
+ id_mgr->reserved_vmid = true;
+ mutex_unlock(&id_mgr->lock);
+
+ return 0;
+unlock:
+ mutex_unlock(&id_mgr->lock);
+ return r;
+}
+
+/*
+ * amdgpu_vmid_free_reserved - free up a reserved VMID again
+ * @adev: amdgpu device structure
+ * @vm: the VM with the reserved ID
+ * @vmhub: the VMHUB which should be used
+ */
+void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ unsigned vmhub)
+{
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+
+ mutex_lock(&id_mgr->lock);
+ if (vm->reserved_vmid[vmhub]) {
+ list_add(&vm->reserved_vmid[vmhub]->list,
+ &id_mgr->ids_lru);
+ vm->reserved_vmid[vmhub] = NULL;
+ id_mgr->reserved_vmid = false;
+ }
+ mutex_unlock(&id_mgr->lock);
+}
+
+/**
+ * amdgpu_vmid_reset - reset VMID to zero
+ *
+ * @adev: amdgpu device structure
+ * @vmhub: vmhub type
+ * @vmid: vmid number to use
+ *
+ * Reset saved GDW, GWS and OA to force switch on next flush.
+ */
+void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
+ unsigned vmid)
+{
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ struct amdgpu_vmid *id = &id_mgr->ids[vmid];
+
+ mutex_lock(&id_mgr->lock);
+ id->owner = 0;
+ id->gds_base = 0;
+ id->gds_size = 0;
+ id->gws_base = 0;
+ id->gws_size = 0;
+ id->oa_base = 0;
+ id->oa_size = 0;
+ mutex_unlock(&id_mgr->lock);
+}
+
+/**
+ * amdgpu_vmid_reset_all - reset VMID to zero
+ *
+ * @adev: amdgpu device structure
+ *
+ * Reset VMID to force flush on next use
+ */
+void amdgpu_vmid_reset_all(struct amdgpu_device *adev)
+{
+ unsigned i, j;
+
+ for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
+ struct amdgpu_vmid_mgr *id_mgr =
+ &adev->vm_manager.id_mgr[i];
+
+ for (j = 1; j < id_mgr->num_ids; ++j)
+ amdgpu_vmid_reset(adev, i, j);
+ }
+}
+
+/**
+ * amdgpu_vmid_mgr_init - init the VMID manager
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Initialize the VM manager structures
+ */
+void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
+{
+ unsigned i, j;
+
+ for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
+ struct amdgpu_vmid_mgr *id_mgr =
+ &adev->vm_manager.id_mgr[i];
+
+ mutex_init(&id_mgr->lock);
+ INIT_LIST_HEAD(&id_mgr->ids_lru);
+
+ /* for GC <10, SDMA uses MMHUB so use first_kfd_vmid for both GC and MM */
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(10, 0, 0))
+ /* manage only VMIDs not used by KFD */
+ id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
+ else if (AMDGPU_IS_MMHUB0(i) ||
+ AMDGPU_IS_MMHUB1(i))
+ id_mgr->num_ids = 16;
+ else
+ /* manage only VMIDs not used by KFD */
+ id_mgr->num_ids = adev->vm_manager.first_kfd_vmid;
+
+ /* skip over VMID 0, since it is the system VM */
+ for (j = 1; j < id_mgr->num_ids; ++j) {
+ amdgpu_vmid_reset(adev, i, j);
+ amdgpu_sync_create(&id_mgr->ids[j].active);
+ list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
+ }
+ }
+}
+
+/**
+ * amdgpu_vmid_mgr_fini - cleanup VM manager
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Cleanup the VM manager and free resources.
+ */
+void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev)
+{
+ unsigned i, j;
+
+ for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
+ struct amdgpu_vmid_mgr *id_mgr =
+ &adev->vm_manager.id_mgr[i];
+
+ mutex_destroy(&id_mgr->lock);
+ for (j = 0; j < AMDGPU_NUM_VMID; ++j) {
+ struct amdgpu_vmid *id = &id_mgr->ids[j];
+
+ amdgpu_sync_free(&id->active);
+ dma_fence_put(id->last_flush);
+ dma_fence_put(id->pasid_mapping);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
new file mode 100644
index 000000000000..b3649cd3af56
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_IDS_H__
+#define __AMDGPU_IDS_H__
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/dma-fence.h>
+
+#include "amdgpu_sync.h"
+
+/* maximum number of VMIDs */
+#define AMDGPU_NUM_VMID 16
+
+struct amdgpu_device;
+struct amdgpu_vm;
+struct amdgpu_ring;
+struct amdgpu_sync;
+struct amdgpu_job;
+
+struct amdgpu_vmid {
+ struct list_head list;
+ struct amdgpu_sync active;
+ struct dma_fence *last_flush;
+ uint64_t owner;
+
+ uint64_t pd_gpu_addr;
+ /* last flushed PD/PT update */
+ uint64_t flushed_updates;
+
+ uint32_t current_gpu_reset_count;
+
+ uint32_t gds_base;
+ uint32_t gds_size;
+ uint32_t gws_base;
+ uint32_t gws_size;
+ uint32_t oa_base;
+ uint32_t oa_size;
+
+ unsigned pasid;
+ struct dma_fence *pasid_mapping;
+};
+
+struct amdgpu_vmid_mgr {
+ struct mutex lock;
+ unsigned num_ids;
+ struct list_head ids_lru;
+ struct amdgpu_vmid ids[AMDGPU_NUM_VMID];
+ bool reserved_vmid;
+};
+
+int amdgpu_pasid_alloc(unsigned int bits);
+void amdgpu_pasid_free(u32 pasid);
+void amdgpu_pasid_free_delayed(struct dma_resv *resv,
+ u32 pasid);
+
+bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
+ struct amdgpu_vmid *id);
+bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub);
+int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ unsigned vmhub);
+void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ unsigned vmhub);
+int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+ struct amdgpu_job *job, struct dma_fence **fence);
+void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
+ unsigned vmid);
+void amdgpu_vmid_reset_all(struct amdgpu_device *adev);
+
+void amdgpu_vmid_mgr_init(struct amdgpu_device *adev);
+void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 3ab4c65ecc8b..a6419246e9c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -21,49 +21,26 @@
*
*/
-#include <drm/drmP.h>
+#include <linux/dma-mapping.h>
+
#include "amdgpu.h"
#include "amdgpu_ih.h"
-#include "amdgpu_amdkfd.h"
-
-/**
- * amdgpu_ih_ring_alloc - allocate memory for the IH ring
- *
- * @adev: amdgpu_device pointer
- *
- * Allocate a ring buffer for the interrupt controller.
- * Returns 0 for success, errors for failure.
- */
-static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev)
-{
- int r;
-
- /* Allocate ring buffer */
- if (adev->irq.ih.ring_obj == NULL) {
- r = amdgpu_bo_create_kernel(adev, adev->irq.ih.ring_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
- &adev->irq.ih.ring_obj,
- &adev->irq.ih.gpu_addr,
- (void **)&adev->irq.ih.ring);
- if (r) {
- DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r);
- return r;
- }
- }
- return 0;
-}
+#include "amdgpu_reset.h"
/**
* amdgpu_ih_ring_init - initialize the IH state
*
* @adev: amdgpu_device pointer
+ * @ih: ih ring to initialize
+ * @ring_size: ring size to allocate
+ * @use_bus_addr: true when we can use dma_alloc_coherent
*
* Initializes the IH state and allocates a buffer
* for the IH ring buffer.
* Returns 0 for success, errors for failure.
*/
-int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
- bool use_bus_addr)
+int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
+ unsigned ring_size, bool use_bus_addr)
{
u32 rb_bufsz;
int r;
@@ -71,122 +48,270 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
/* Align ring size */
rb_bufsz = order_base_2(ring_size / 4);
ring_size = (1 << rb_bufsz) * 4;
- adev->irq.ih.ring_size = ring_size;
- adev->irq.ih.ptr_mask = adev->irq.ih.ring_size - 1;
- adev->irq.ih.rptr = 0;
- adev->irq.ih.use_bus_addr = use_bus_addr;
-
- if (adev->irq.ih.use_bus_addr) {
- if (!adev->irq.ih.ring) {
- /* add 8 bytes for the rptr/wptr shadows and
- * add them to the end of the ring allocation.
- */
- adev->irq.ih.ring = pci_alloc_consistent(adev->pdev,
- adev->irq.ih.ring_size + 8,
- &adev->irq.ih.rb_dma_addr);
- if (adev->irq.ih.ring == NULL)
- return -ENOMEM;
- memset((void *)adev->irq.ih.ring, 0, adev->irq.ih.ring_size + 8);
- adev->irq.ih.wptr_offs = (adev->irq.ih.ring_size / 4) + 0;
- adev->irq.ih.rptr_offs = (adev->irq.ih.ring_size / 4) + 1;
- }
- return 0;
+ ih->ring_size = ring_size;
+ ih->ptr_mask = ih->ring_size - 1;
+ ih->rptr = 0;
+ ih->use_bus_addr = use_bus_addr;
+
+ if (use_bus_addr) {
+ dma_addr_t dma_addr;
+
+ if (ih->ring)
+ return 0;
+
+ /* add 8 bytes for the rptr/wptr shadows and
+ * add them to the end of the ring allocation.
+ */
+ ih->ring = dma_alloc_coherent(adev->dev, ih->ring_size + 8,
+ &dma_addr, GFP_KERNEL);
+ if (ih->ring == NULL)
+ return -ENOMEM;
+
+ ih->gpu_addr = dma_addr;
+ ih->wptr_addr = dma_addr + ih->ring_size;
+ ih->wptr_cpu = &ih->ring[ih->ring_size / 4];
+ ih->rptr_addr = dma_addr + ih->ring_size + 4;
+ ih->rptr_cpu = &ih->ring[(ih->ring_size / 4) + 1];
} else {
- r = amdgpu_wb_get(adev, &adev->irq.ih.wptr_offs);
+ unsigned wptr_offs, rptr_offs;
+
+ r = amdgpu_device_wb_get(adev, &wptr_offs);
+ if (r)
+ return r;
+
+ r = amdgpu_device_wb_get(adev, &rptr_offs);
if (r) {
- dev_err(adev->dev, "(%d) ih wptr_offs wb alloc failed\n", r);
+ amdgpu_device_wb_free(adev, wptr_offs);
return r;
}
- r = amdgpu_wb_get(adev, &adev->irq.ih.rptr_offs);
+ r = amdgpu_bo_create_kernel(adev, ih->ring_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &ih->ring_obj, &ih->gpu_addr,
+ (void **)&ih->ring);
if (r) {
- amdgpu_wb_free(adev, adev->irq.ih.wptr_offs);
- dev_err(adev->dev, "(%d) ih rptr_offs wb alloc failed\n", r);
+ amdgpu_device_wb_free(adev, rptr_offs);
+ amdgpu_device_wb_free(adev, wptr_offs);
return r;
}
- return amdgpu_ih_ring_alloc(adev);
+ ih->wptr_addr = adev->wb.gpu_addr + wptr_offs * 4;
+ ih->wptr_cpu = &adev->wb.wb[wptr_offs];
+ ih->rptr_addr = adev->wb.gpu_addr + rptr_offs * 4;
+ ih->rptr_cpu = &adev->wb.wb[rptr_offs];
}
+
+ init_waitqueue_head(&ih->wait_process);
+ return 0;
}
/**
* amdgpu_ih_ring_fini - tear down the IH state
*
* @adev: amdgpu_device pointer
+ * @ih: ih ring to tear down
*
* Tears down the IH state and frees buffer
* used for the IH ring buffer.
*/
-void amdgpu_ih_ring_fini(struct amdgpu_device *adev)
+void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
{
- if (adev->irq.ih.use_bus_addr) {
- if (adev->irq.ih.ring) {
- /* add 8 bytes for the rptr/wptr shadows and
- * add them to the end of the ring allocation.
- */
- pci_free_consistent(adev->pdev, adev->irq.ih.ring_size + 8,
- (void *)adev->irq.ih.ring,
- adev->irq.ih.rb_dma_addr);
- adev->irq.ih.ring = NULL;
- }
+
+ if (!ih->ring)
+ return;
+
+ if (ih->use_bus_addr) {
+
+ /* add 8 bytes for the rptr/wptr shadows and
+ * add them to the end of the ring allocation.
+ */
+ dma_free_coherent(adev->dev, ih->ring_size + 8,
+ (void *)ih->ring, ih->gpu_addr);
+ ih->ring = NULL;
} else {
- amdgpu_bo_free_kernel(&adev->irq.ih.ring_obj,
- &adev->irq.ih.gpu_addr,
- (void **)&adev->irq.ih.ring);
- amdgpu_wb_free(adev, adev->irq.ih.wptr_offs);
- amdgpu_wb_free(adev, adev->irq.ih.rptr_offs);
+ amdgpu_bo_free_kernel(&ih->ring_obj, &ih->gpu_addr,
+ (void **)&ih->ring);
+ amdgpu_device_wb_free(adev, (ih->wptr_addr - ih->gpu_addr) / 4);
+ amdgpu_device_wb_free(adev, (ih->rptr_addr - ih->gpu_addr) / 4);
+ }
+}
+
+/**
+ * amdgpu_ih_ring_write - write IV to the ring buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: ih ring to write to
+ * @iv: the iv to write
+ * @num_dw: size of the iv in dw
+ *
+ * Writes an IV to the ring buffer using the CPU and increment the wptr.
+ * Used for testing and delegating IVs to a software ring.
+ */
+void amdgpu_ih_ring_write(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
+ const uint32_t *iv, unsigned int num_dw)
+{
+ uint32_t wptr = le32_to_cpu(*ih->wptr_cpu) >> 2;
+ unsigned int i;
+
+ for (i = 0; i < num_dw; ++i)
+ ih->ring[wptr++] = cpu_to_le32(iv[i]);
+
+ wptr <<= 2;
+ wptr &= ih->ptr_mask;
+
+ /* Only commit the new wptr if we don't overflow */
+ if (wptr != READ_ONCE(ih->rptr)) {
+ wmb();
+ WRITE_ONCE(*ih->wptr_cpu, cpu_to_le32(wptr));
+ } else if (adev->irq.retry_cam_enabled) {
+ dev_warn_once(adev->dev, "IH soft ring buffer overflow 0x%X, 0x%X\n",
+ wptr, ih->rptr);
}
}
/**
+ * amdgpu_ih_wait_on_checkpoint_process_ts - wait to process IVs up to checkpoint
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: ih ring to process
+ *
+ * Used to ensure ring has processed IVs up to the checkpoint write pointer.
+ */
+int amdgpu_ih_wait_on_checkpoint_process_ts(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ uint32_t checkpoint_wptr;
+ uint64_t checkpoint_ts;
+ long timeout = HZ;
+
+ if (!ih->enabled || adev->shutdown)
+ return -ENODEV;
+
+ checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih);
+ /* Order wptr with ring data. */
+ rmb();
+ checkpoint_ts = amdgpu_ih_decode_iv_ts(adev, ih, checkpoint_wptr, -1);
+
+ return wait_event_interruptible_timeout(ih->wait_process,
+ amdgpu_ih_ts_after(checkpoint_ts, ih->processed_timestamp) ||
+ ih->rptr == amdgpu_ih_get_wptr(adev, ih), timeout);
+}
+
+/**
* amdgpu_ih_process - interrupt handler
*
* @adev: amdgpu_device pointer
+ * @ih: ih ring to process
*
* Interrupt hander (VI), walk the IH ring.
* Returns irq process return code.
*/
-int amdgpu_ih_process(struct amdgpu_device *adev)
+int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
{
- struct amdgpu_iv_entry entry;
+ unsigned int count;
u32 wptr;
- if (!adev->irq.ih.enabled || adev->shutdown)
+ if (!ih->enabled || adev->shutdown)
return IRQ_NONE;
- wptr = amdgpu_ih_get_wptr(adev);
+ wptr = amdgpu_ih_get_wptr(adev, ih);
restart_ih:
- /* is somebody else already processing irqs? */
- if (atomic_xchg(&adev->irq.ih.lock, 1))
- return IRQ_NONE;
-
- DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, adev->irq.ih.rptr, wptr);
+ count = AMDGPU_IH_MAX_NUM_IVS;
+ dev_dbg(adev->dev, "%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
/* Order reading of wptr vs. reading of IH ring data */
rmb();
- while (adev->irq.ih.rptr != wptr) {
- u32 ring_index = adev->irq.ih.rptr >> 2;
-
- /* Before dispatching irq to IP blocks, send it to amdkfd */
- amdgpu_amdkfd_interrupt(adev,
- (const void *) &adev->irq.ih.ring[ring_index]);
+ while (ih->rptr != wptr && --count) {
+ amdgpu_irq_dispatch(adev, ih);
+ ih->rptr &= ih->ptr_mask;
+ }
- entry.iv_entry = (const uint32_t *)
- &adev->irq.ih.ring[ring_index];
- amdgpu_ih_decode_iv(adev, &entry);
- adev->irq.ih.rptr &= adev->irq.ih.ptr_mask;
+ if (!ih->overflow)
+ amdgpu_ih_set_rptr(adev, ih);
- amdgpu_irq_dispatch(adev, &entry);
- }
- amdgpu_ih_set_rptr(adev);
- atomic_set(&adev->irq.ih.lock, 0);
+ wake_up_all(&ih->wait_process);
/* make sure wptr hasn't changed while processing */
- wptr = amdgpu_ih_get_wptr(adev);
- if (wptr != adev->irq.ih.rptr)
- goto restart_ih;
+ wptr = amdgpu_ih_get_wptr(adev, ih);
+ if (wptr != ih->rptr)
+ if (!ih->overflow)
+ goto restart_ih;
+
+ if (ih->overflow)
+ if (amdgpu_sriov_runtime(adev))
+ WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->virt.flr_work),
+ "Failed to queue work! at %s",
+ __func__);
return IRQ_HANDLED;
}
+
+/**
+ * amdgpu_ih_decode_iv_helper - decode an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: ih ring to process
+ * @entry: IV entry
+ *
+ * Decodes the interrupt vector at the current rptr
+ * position and also advance the position for Vega10
+ * and later GPUs.
+ */
+void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih,
+ struct amdgpu_iv_entry *entry)
+{
+ /* wptr/rptr are in bytes! */
+ u32 ring_index = ih->rptr >> 2;
+ uint32_t dw[8];
+
+ dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
+ dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
+ dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
+ dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
+ dw[4] = le32_to_cpu(ih->ring[ring_index + 4]);
+ dw[5] = le32_to_cpu(ih->ring[ring_index + 5]);
+ dw[6] = le32_to_cpu(ih->ring[ring_index + 6]);
+ dw[7] = le32_to_cpu(ih->ring[ring_index + 7]);
+
+ entry->client_id = dw[0] & 0xff;
+ entry->src_id = (dw[0] >> 8) & 0xff;
+ entry->ring_id = (dw[0] >> 16) & 0xff;
+ entry->vmid = (dw[0] >> 24) & 0xf;
+ entry->vmid_src = (dw[0] >> 31);
+ entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
+ entry->timestamp_src = dw[2] >> 31;
+ entry->pasid = dw[3] & 0xffff;
+ entry->node_id = (dw[3] >> 16) & 0xff;
+ entry->src_data[0] = dw[4];
+ entry->src_data[1] = dw[5];
+ entry->src_data[2] = dw[6];
+ entry->src_data[3] = dw[7];
+
+ /* wptr/rptr are in bytes! */
+ ih->rptr += 32;
+}
+
+uint64_t amdgpu_ih_decode_iv_ts_helper(struct amdgpu_ih_ring *ih, u32 rptr,
+ signed int offset)
+{
+ uint32_t iv_size = 32;
+ uint32_t ring_index;
+ uint32_t dw1, dw2;
+
+ rptr += iv_size * offset;
+ ring_index = (rptr & ih->ptr_mask) >> 2;
+
+ dw1 = le32_to_cpu(ih->ring[ring_index + 1]);
+ dw2 = le32_to_cpu(ih->ring[ring_index + 2]);
+ return dw1 | ((u64)(dw2 & 0xffff) << 32);
+}
+
+const char *amdgpu_ih_ring_name(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
+{
+ return ih == &adev->irq.ih ? "ih" : ih == &adev->irq.ih_soft ? "sw ih" :
+ ih == &adev->irq.ih1 ? "ih1" : ih == &adev->irq.ih2 ? "ih2" : "unknown";
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index a3da1a122fc8..f58b6be7fccc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -24,89 +24,95 @@
#ifndef __AMDGPU_IH_H__
#define __AMDGPU_IH_H__
+/* Maximum number of IVs processed at once */
+#define AMDGPU_IH_MAX_NUM_IVS 32
+
+#define IH_RING_SIZE (256 * 1024)
+#define IH_SW_RING_SIZE (16 * 1024) /* enough for 512 CAM entries */
+
struct amdgpu_device;
- /*
- * vega10+ IH clients
- */
-enum amdgpu_ih_clientid
-{
- AMDGPU_IH_CLIENTID_IH = 0x00,
- AMDGPU_IH_CLIENTID_ACP = 0x01,
- AMDGPU_IH_CLIENTID_ATHUB = 0x02,
- AMDGPU_IH_CLIENTID_BIF = 0x03,
- AMDGPU_IH_CLIENTID_DCE = 0x04,
- AMDGPU_IH_CLIENTID_ISP = 0x05,
- AMDGPU_IH_CLIENTID_PCIE0 = 0x06,
- AMDGPU_IH_CLIENTID_RLC = 0x07,
- AMDGPU_IH_CLIENTID_SDMA0 = 0x08,
- AMDGPU_IH_CLIENTID_SDMA1 = 0x09,
- AMDGPU_IH_CLIENTID_SE0SH = 0x0a,
- AMDGPU_IH_CLIENTID_SE1SH = 0x0b,
- AMDGPU_IH_CLIENTID_SE2SH = 0x0c,
- AMDGPU_IH_CLIENTID_SE3SH = 0x0d,
- AMDGPU_IH_CLIENTID_SYSHUB = 0x0e,
- AMDGPU_IH_CLIENTID_THM = 0x0f,
- AMDGPU_IH_CLIENTID_UVD = 0x10,
- AMDGPU_IH_CLIENTID_VCE0 = 0x11,
- AMDGPU_IH_CLIENTID_VMC = 0x12,
- AMDGPU_IH_CLIENTID_XDMA = 0x13,
- AMDGPU_IH_CLIENTID_GRBM_CP = 0x14,
- AMDGPU_IH_CLIENTID_ATS = 0x15,
- AMDGPU_IH_CLIENTID_ROM_SMUIO = 0x16,
- AMDGPU_IH_CLIENTID_DF = 0x17,
- AMDGPU_IH_CLIENTID_VCE1 = 0x18,
- AMDGPU_IH_CLIENTID_PWR = 0x19,
- AMDGPU_IH_CLIENTID_UTCL2 = 0x1b,
- AMDGPU_IH_CLIENTID_EA = 0x1c,
- AMDGPU_IH_CLIENTID_UTCL2LOG = 0x1d,
- AMDGPU_IH_CLIENTID_MP0 = 0x1e,
- AMDGPU_IH_CLIENTID_MP1 = 0x1f,
-
- AMDGPU_IH_CLIENTID_MAX
+struct amdgpu_iv_entry;
+struct amdgpu_ih_regs {
+ uint32_t ih_rb_base;
+ uint32_t ih_rb_base_hi;
+ uint32_t ih_rb_cntl;
+ uint32_t ih_rb_wptr;
+ uint32_t ih_rb_rptr;
+ uint32_t ih_doorbell_rptr;
+ uint32_t ih_rb_wptr_addr_lo;
+ uint32_t ih_rb_wptr_addr_hi;
+ uint32_t psp_reg_id;
};
-#define AMDGPU_IH_CLIENTID_LEGACY 0
-
/*
* R6xx+ IH ring
*/
struct amdgpu_ih_ring {
- struct amdgpu_bo *ring_obj;
- volatile uint32_t *ring;
- unsigned rptr;
unsigned ring_size;
- uint64_t gpu_addr;
uint32_t ptr_mask;
- atomic_t lock;
- bool enabled;
- unsigned wptr_offs;
- unsigned rptr_offs;
u32 doorbell_index;
bool use_doorbell;
bool use_bus_addr;
- dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */
+
+ struct amdgpu_bo *ring_obj;
+ uint32_t *ring;
+ uint64_t gpu_addr;
+
+ uint64_t wptr_addr;
+ uint32_t *wptr_cpu;
+
+ uint64_t rptr_addr;
+ uint32_t *rptr_cpu;
+
+ bool enabled;
+ unsigned rptr;
+ struct amdgpu_ih_regs ih_regs;
+
+ /* For waiting on IH processing at checkpoint. */
+ wait_queue_head_t wait_process;
+ uint64_t processed_timestamp;
+ bool overflow;
};
-#define AMDGPU_IH_SRC_DATA_MAX_SIZE_DW 4
-
-struct amdgpu_iv_entry {
- unsigned client_id;
- unsigned src_id;
- unsigned ring_id;
- unsigned vm_id;
- unsigned vm_id_src;
- uint64_t timestamp;
- unsigned timestamp_src;
- unsigned pas_id;
- unsigned pasid_src;
- unsigned src_data[AMDGPU_IH_SRC_DATA_MAX_SIZE_DW];
- const uint32_t *iv_entry;
+/* return true if time stamp t2 is after t1 with 48bit wrap around */
+#define amdgpu_ih_ts_after(t1, t2) \
+ (((int64_t)((t2) << 16) - (int64_t)((t1) << 16)) > 0LL)
+
+#define amdgpu_ih_ts_after_or_equal(t1, t2) \
+ (((int64_t)((t2) << 16) - (int64_t)((t1) << 16)) >= 0LL)
+
+/* provided by the ih block */
+struct amdgpu_ih_funcs {
+ /* ring read/write ptr handling, called from interrupt context */
+ u32 (*get_wptr)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
+ void (*decode_iv)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
+ struct amdgpu_iv_entry *entry);
+ uint64_t (*decode_iv_ts)(struct amdgpu_ih_ring *ih, u32 rptr,
+ signed int offset);
+ void (*set_rptr)(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
};
-int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
- bool use_bus_addr);
-void amdgpu_ih_ring_fini(struct amdgpu_device *adev);
-int amdgpu_ih_process(struct amdgpu_device *adev);
+#define amdgpu_ih_get_wptr(adev, ih) (adev)->irq.ih_funcs->get_wptr((adev), (ih))
+#define amdgpu_ih_decode_iv(adev, iv) \
+ (adev)->irq.ih_funcs->decode_iv((adev), (ih), (iv))
+#define amdgpu_ih_decode_iv_ts(adev, ih, rptr, offset) \
+ (WARN_ON_ONCE(!(adev)->irq.ih_funcs->decode_iv_ts) ? 0 : \
+ (adev)->irq.ih_funcs->decode_iv_ts((ih), (rptr), (offset)))
+#define amdgpu_ih_set_rptr(adev, ih) (adev)->irq.ih_funcs->set_rptr((adev), (ih))
+int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
+ unsigned ring_size, bool use_bus_addr);
+void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
+void amdgpu_ih_ring_write(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
+ const uint32_t *iv, unsigned int num_dw);
+int amdgpu_ih_wait_on_checkpoint_process_ts(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih);
+int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
+void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih,
+ struct amdgpu_iv_entry *entry);
+uint64_t amdgpu_ih_decode_iv_ts_helper(struct amdgpu_ih_ring *ih, u32 rptr,
+ signed int offset);
+const char *amdgpu_ih_ring_name(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_imu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_imu.h
new file mode 100644
index 000000000000..484e936812e4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_imu.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_IMU_H__
+#define __AMDGPU_IMU_H__
+
+enum imu_work_mode {
+ DEBUG_MODE,
+ MISSION_MODE
+};
+
+struct amdgpu_imu_funcs {
+ int (*init_microcode)(struct amdgpu_device *adev);
+ int (*load_microcode)(struct amdgpu_device *adev);
+ void (*setup_imu)(struct amdgpu_device *adev);
+ int (*start_imu)(struct amdgpu_device *adev);
+ void (*program_rlc_ram)(struct amdgpu_device *adev);
+ int (*wait_for_reset_status)(struct amdgpu_device *adev);
+};
+
+struct imu_rlc_ram_golden {
+ u32 hwip;
+ u32 instance;
+ u32 segment;
+ u32 reg;
+ u32 data;
+ u32 addr_mask;
+};
+
+#define IMU_RLC_RAM_GOLDEN_VALUE(ip, inst, reg, data, addr_mask) \
+ { ip##_HWIP, inst, reg##_BASE_IDX, reg, data, addr_mask }
+
+struct amdgpu_imu {
+ const struct amdgpu_imu_funcs *funcs;
+ enum imu_work_mode mode;
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c
index 26482914dc4b..a1cbd7c3deb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ioc32.c
@@ -1,4 +1,4 @@
-/**
+/*
* \file amdgpu_ioc32.c
*
* 32-bit ioctl compatibility routines for the AMDGPU DRM.
@@ -29,19 +29,17 @@
*/
#include <linux/compat.h>
-#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
+#include <drm/drm_ioctl.h>
+
#include "amdgpu_drv.h"
long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
- int ret;
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
- ret = amdgpu_drm_ioctl(filp, cmd, arg);
-
- return ret;
+ return amdgpu_drm_ioctl(filp, cmd, arg);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c
new file mode 100644
index 000000000000..99e1cf4fc955
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_ip.h"
+
+static int8_t amdgpu_logical_to_dev_inst(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type block,
+ int8_t inst)
+{
+ int8_t dev_inst;
+
+ switch (block) {
+ case GC_HWIP:
+ case SDMA0_HWIP:
+ /* Both JPEG and VCN as JPEG is only alias of VCN */
+ case VCN_HWIP:
+ dev_inst = adev->ip_map.dev_inst[block][inst];
+ break;
+ default:
+ /* For rest of the IPs, no look up required.
+ * Assume 'logical instance == physical instance' for all configs. */
+ dev_inst = inst;
+ break;
+ }
+
+ return dev_inst;
+}
+
+static uint32_t amdgpu_logical_to_dev_mask(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type block,
+ uint32_t mask)
+{
+ uint32_t dev_mask = 0;
+ int8_t log_inst, dev_inst;
+
+ while (mask) {
+ log_inst = ffs(mask) - 1;
+ dev_inst = amdgpu_logical_to_dev_inst(adev, block, log_inst);
+ dev_mask |= (1 << dev_inst);
+ mask &= ~(1 << log_inst);
+ }
+
+ return dev_mask;
+}
+
+static void amdgpu_populate_ip_map(struct amdgpu_device *adev,
+ enum amd_hw_ip_block_type ip_block,
+ uint32_t inst_mask)
+{
+ int l = 0, i;
+
+ while (inst_mask) {
+ i = ffs(inst_mask) - 1;
+ adev->ip_map.dev_inst[ip_block][l++] = i;
+ inst_mask &= ~(1 << i);
+ }
+ for (; l < HWIP_MAX_INSTANCE; l++)
+ adev->ip_map.dev_inst[ip_block][l] = -1;
+}
+
+void amdgpu_ip_map_init(struct amdgpu_device *adev)
+{
+ u32 ip_map[][2] = {
+ { GC_HWIP, adev->gfx.xcc_mask },
+ { SDMA0_HWIP, adev->sdma.sdma_mask },
+ { VCN_HWIP, adev->vcn.inst_mask },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ip_map); ++i)
+ amdgpu_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]);
+
+ adev->ip_map.logical_to_dev_inst = amdgpu_logical_to_dev_inst;
+ adev->ip_map.logical_to_dev_mask = amdgpu_logical_to_dev_mask;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h
new file mode 100644
index 000000000000..2490fd322aec
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_IP_H__
+#define __AMDGPU_IP_H__
+
+void amdgpu_ip_map_init(struct amdgpu_device *adev);
+
+#endif /* __AMDGPU_IP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index a6b7e367a860..8112ffc85995 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -25,76 +25,110 @@
* Alex Deucher
* Jerome Glisse
*/
+
+/**
+ * DOC: Interrupt Handling
+ *
+ * Interrupts generated within GPU hardware raise interrupt requests that are
+ * passed to amdgpu IRQ handler which is responsible for detecting source and
+ * type of the interrupt and dispatching matching handlers. If handling an
+ * interrupt requires calling kernel functions that may sleep processing is
+ * dispatched to work handlers.
+ *
+ * If MSI functionality is not disabled by module parameter then MSI
+ * support will be enabled.
+ *
+ * For GPU interrupt sources that may be driven by another driver, IRQ domain
+ * support is used (with mapping between virtual and hardware IRQs).
+ */
+
#include <linux/irq.h>
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
+#include <linux/pci.h>
+
+#include <drm/drm_vblank.h>
#include <drm/amdgpu_drm.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_ih.h"
#include "atom.h"
#include "amdgpu_connectors.h"
#include "amdgpu_trace.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_ras.h"
#include <linux/pm_runtime.h>
+#ifdef CONFIG_DRM_AMD_DC
+#include "amdgpu_dm_irq.h"
+#endif
+
#define AMDGPU_WAIT_IDLE_TIMEOUT 200
-/*
- * Handle hotplug events outside the interrupt handler proper.
- */
-/**
- * amdgpu_hotplug_work_func - display hotplug work handler
- *
- * @work: work struct
- *
- * This is the hot plug event work handler (all asics).
- * The work gets scheduled from the irq handler if there
- * was a hot plug interrupt. It walks the connector table
- * and calls the hotplug handler for each one, then sends
- * a drm hotplug event to alert userspace.
- */
-static void amdgpu_hotplug_work_func(struct work_struct *work)
-{
- struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
- hotplug_work);
- struct drm_device *dev = adev->ddev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *connector;
-
- mutex_lock(&mode_config->mutex);
- list_for_each_entry(connector, &mode_config->connector_list, head)
- amdgpu_connector_hotplug(connector);
- mutex_unlock(&mode_config->mutex);
- /* Just fire off a uevent and let userspace tell us what to do */
- drm_helper_hpd_irq_event(dev);
-}
+const char *soc15_ih_clientid_name[] = {
+ "IH",
+ "SDMA2 or ACP",
+ "ATHUB",
+ "BIF",
+ "SDMA3 or DCE",
+ "SDMA4 or ISP",
+ "VMC1 or PCIE0",
+ "RLC",
+ "SDMA0",
+ "SDMA1",
+ "SE0SH",
+ "SE1SH",
+ "SE2SH",
+ "SE3SH",
+ "VCN1 or UVD1",
+ "THM",
+ "VCN or UVD",
+ "SDMA5 or VCE0",
+ "VMC",
+ "SDMA6 or XDMA",
+ "GRBM_CP",
+ "ATS",
+ "ROM_SMUIO",
+ "DF",
+ "SDMA7 or VCE1",
+ "PWR",
+ "reserved",
+ "UTCL2",
+ "EA",
+ "UTCL2LOG",
+ "MP0",
+ "MP1"
+};
+
+const int node_id_to_phys_map[NODEID_MAX] = {
+ [AID0_NODEID] = 0,
+ [XCD0_NODEID] = 0,
+ [XCD1_NODEID] = 1,
+ [AID1_NODEID] = 1,
+ [XCD2_NODEID] = 2,
+ [XCD3_NODEID] = 3,
+ [AID2_NODEID] = 2,
+ [XCD4_NODEID] = 4,
+ [XCD5_NODEID] = 5,
+ [AID3_NODEID] = 3,
+ [XCD6_NODEID] = 6,
+ [XCD7_NODEID] = 7,
+};
/**
- * amdgpu_irq_reset_work_func - execute gpu reset
+ * amdgpu_irq_disable_all - disable *all* interrupts
*
- * @work: work struct
+ * @adev: amdgpu device pointer
*
- * Execute scheduled gpu reset (cayman+).
- * This function is called when the irq handler
- * thinks we need a gpu reset.
+ * Disable all types of interrupts from all sources.
*/
-static void amdgpu_irq_reset_work_func(struct work_struct *work)
-{
- struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
- reset_work);
-
- amdgpu_gpu_reset(adev);
-}
-
-/* Disable *all* interrupts */
-static void amdgpu_irq_disable_all(struct amdgpu_device *adev)
+void amdgpu_irq_disable_all(struct amdgpu_device *adev)
{
unsigned long irqflags;
- unsigned i, j, k;
+ unsigned int i, j, k;
int r;
spin_lock_irqsave(&adev->irq.lock, irqflags);
- for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
+ for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
if (!adev->irq.client[i].sources)
continue;
@@ -105,12 +139,12 @@ static void amdgpu_irq_disable_all(struct amdgpu_device *adev)
continue;
for (k = 0; k < src->num_types; ++k) {
- atomic_set(&src->enabled_types[k], 0);
r = src->funcs->set(adev, src, k,
AMDGPU_IRQ_STATE_DISABLE);
if (r)
- DRM_ERROR("error disabling interrupt (%d)\n",
- r);
+ dev_err(adev->dev,
+ "error disabling interrupt (%d)\n",
+ r);
}
}
}
@@ -118,86 +152,89 @@ static void amdgpu_irq_disable_all(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_preinstall - drm irq preinstall callback
+ * amdgpu_irq_handler - IRQ handler
*
- * @dev: drm dev pointer
+ * @irq: IRQ number (unused)
+ * @arg: pointer to DRM device
*
- * Gets the hw ready to enable irqs (all asics).
- * This function disables all interrupt sources on the GPU.
+ * IRQ handler for amdgpu driver (all ASICs).
+ *
+ * Returns:
+ * result of handling the IRQ, as defined by &irqreturn_t
*/
-void amdgpu_irq_preinstall(struct drm_device *dev)
+static irqreturn_t amdgpu_irq_handler(int irq, void *arg)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct drm_device *dev = (struct drm_device *) arg;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ irqreturn_t ret;
+
+ ret = amdgpu_ih_process(adev, &adev->irq.ih);
+ if (ret == IRQ_HANDLED)
+ pm_runtime_mark_last_busy(dev->dev);
- /* Disable *all* interrupts */
- amdgpu_irq_disable_all(adev);
- /* Clear bits */
- amdgpu_ih_process(adev);
+ amdgpu_ras_interrupt_fatal_error_handler(adev);
+
+ return ret;
}
/**
- * amdgpu_irq_postinstall - drm irq preinstall callback
+ * amdgpu_irq_handle_ih1 - kick of processing for IH1
*
- * @dev: drm dev pointer
+ * @work: work structure in struct amdgpu_irq
*
- * Handles stuff to be done after enabling irqs (all asics).
- * Returns 0 on success.
+ * Kick of processing IH ring 1.
*/
-int amdgpu_irq_postinstall(struct drm_device *dev)
+static void amdgpu_irq_handle_ih1(struct work_struct *work)
{
- dev->max_vblank_count = 0x00ffffff;
- return 0;
+ struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
+ irq.ih1_work);
+
+ amdgpu_ih_process(adev, &adev->irq.ih1);
}
/**
- * amdgpu_irq_uninstall - drm irq uninstall callback
+ * amdgpu_irq_handle_ih2 - kick of processing for IH2
*
- * @dev: drm dev pointer
+ * @work: work structure in struct amdgpu_irq
*
- * This function disables all interrupt sources on the GPU (all asics).
+ * Kick of processing IH ring 2.
*/
-void amdgpu_irq_uninstall(struct drm_device *dev)
+static void amdgpu_irq_handle_ih2(struct work_struct *work)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
+ irq.ih2_work);
- if (adev == NULL) {
- return;
- }
- amdgpu_irq_disable_all(adev);
+ amdgpu_ih_process(adev, &adev->irq.ih2);
}
/**
- * amdgpu_irq_handler - irq handler
+ * amdgpu_irq_handle_ih_soft - kick of processing for ih_soft
*
- * @int irq, void *arg: args
+ * @work: work structure in struct amdgpu_irq
*
- * This is the irq handler for the amdgpu driver (all asics).
+ * Kick of processing IH soft ring.
*/
-irqreturn_t amdgpu_irq_handler(int irq, void *arg)
+static void amdgpu_irq_handle_ih_soft(struct work_struct *work)
{
- struct drm_device *dev = (struct drm_device *) arg;
- struct amdgpu_device *adev = dev->dev_private;
- irqreturn_t ret;
+ struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
+ irq.ih_soft_work);
- ret = amdgpu_ih_process(adev);
- if (ret == IRQ_HANDLED)
- pm_runtime_mark_last_busy(dev->dev);
- return ret;
+ amdgpu_ih_process(adev, &adev->irq.ih_soft);
}
/**
- * amdgpu_msi_ok - asic specific msi checks
+ * amdgpu_msi_ok - check whether MSI functionality is enabled
*
- * @adev: amdgpu device pointer
+ * @adev: amdgpu device pointer (unused)
*
- * Handles asic specific MSI checks to determine if
- * MSIs should be enabled on a particular chip (all asics).
- * Returns true if MSIs should be enabled, false if MSIs
- * should not be enabled.
+ * Checks whether MSI functionality has been disabled via module parameter
+ * (all ASICs).
+ *
+ * Returns:
+ * *true* if MSIs are allowed to be enabled or *false* otherwise
*/
static bool amdgpu_msi_ok(struct amdgpu_device *adev)
{
- /* force MSI on */
if (amdgpu_msi == 1)
return true;
else if (amdgpu_msi == 0)
@@ -206,73 +243,119 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev)
return true;
}
+void amdgpu_restore_msix(struct amdgpu_device *adev)
+{
+ u16 ctrl;
+
+ pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+ if (!(ctrl & PCI_MSIX_FLAGS_ENABLE))
+ return;
+
+ /* VF FLR */
+ ctrl &= ~PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
+ ctrl |= PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl);
+}
+
/**
- * amdgpu_irq_init - init driver interrupt info
+ * amdgpu_irq_init - initialize interrupt handling
*
* @adev: amdgpu device pointer
*
- * Sets up the work irq handlers, vblank init, MSIs, etc. (all asics).
- * Returns 0 for success, error for failure.
+ * Sets up work functions for hotplug and reset interrupts, enables MSI
+ * functionality, initializes vblank, hotplug and reset interrupt handling.
+ *
+ * Returns:
+ * 0 on success or error code on failure
*/
int amdgpu_irq_init(struct amdgpu_device *adev)
{
- int r = 0;
+ unsigned int irq, flags;
+ int r;
spin_lock_init(&adev->irq.lock);
- r = drm_vblank_init(adev->ddev, adev->mode_info.num_crtc);
- if (r) {
- return r;
- }
- /* enable msi */
+ /* Enable MSI if not disabled by module parameter */
adev->irq.msi_enabled = false;
+ if (!amdgpu_msi_ok(adev))
+ flags = PCI_IRQ_INTX;
+ else
+ flags = PCI_IRQ_ALL_TYPES;
+
+ /* we only need one vector */
+ r = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
+ if (r < 0) {
+ dev_err(adev->dev, "Failed to alloc msi vectors\n");
+ return r;
+ }
+
if (amdgpu_msi_ok(adev)) {
- int ret = pci_enable_msi(adev->pdev);
- if (!ret) {
- adev->irq.msi_enabled = true;
- dev_info(adev->dev, "amdgpu: using MSI.\n");
- }
+ adev->irq.msi_enabled = true;
+ dev_dbg(adev->dev, "using MSI/MSI-X.\n");
}
- INIT_WORK(&adev->hotplug_work, amdgpu_hotplug_work_func);
- INIT_WORK(&adev->reset_work, amdgpu_irq_reset_work_func);
+ INIT_WORK(&adev->irq.ih1_work, amdgpu_irq_handle_ih1);
+ INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
+ INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft);
+
+ /* Use vector 0 for MSI-X. */
+ r = pci_irq_vector(adev->pdev, 0);
+ if (r < 0)
+ goto free_vectors;
+ irq = r;
+
+ /* PCI devices require shared interrupts. */
+ r = request_irq(irq, amdgpu_irq_handler, IRQF_SHARED, adev_to_drm(adev)->driver->name,
+ adev_to_drm(adev));
+ if (r)
+ goto free_vectors;
adev->irq.installed = true;
- r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq);
- if (r) {
+ adev->irq.irq = irq;
+ adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
+
+ dev_dbg(adev->dev, "amdgpu: irq initialized.\n");
+ return 0;
+
+free_vectors:
+ if (adev->irq.msi_enabled)
+ pci_free_irq_vectors(adev->pdev);
+
+ adev->irq.msi_enabled = false;
+ return r;
+}
+
+void amdgpu_irq_fini_hw(struct amdgpu_device *adev)
+{
+ if (adev->irq.installed) {
+ free_irq(adev->irq.irq, adev_to_drm(adev));
adev->irq.installed = false;
- flush_work(&adev->hotplug_work);
- cancel_work_sync(&adev->reset_work);
- return r;
+ if (adev->irq.msi_enabled)
+ pci_free_irq_vectors(adev->pdev);
}
- DRM_INFO("amdgpu: irq initialized.\n");
- return 0;
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih_soft);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
}
/**
- * amdgpu_irq_fini - tear down driver interrupt info
+ * amdgpu_irq_fini_sw - shut down interrupt handling
*
* @adev: amdgpu device pointer
*
- * Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics).
+ * Tears down work functions for hotplug and reset interrupts, disables MSI
+ * functionality, shuts down vblank, hotplug and reset interrupt handling,
+ * turns off interrupts from all sources (all ASICs).
*/
-void amdgpu_irq_fini(struct amdgpu_device *adev)
+void amdgpu_irq_fini_sw(struct amdgpu_device *adev)
{
- unsigned i, j;
+ unsigned int i, j;
- drm_vblank_cleanup(adev->ddev);
- if (adev->irq.installed) {
- drm_irq_uninstall(adev->ddev);
- adev->irq.installed = false;
- if (adev->irq.msi_enabled)
- pci_disable_msi(adev->pdev);
- flush_work(&adev->hotplug_work);
- cancel_work_sync(&adev->reset_work);
- }
-
- for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
+ for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
if (!adev->irq.client[i].sources)
continue;
@@ -284,29 +367,30 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
kfree(src->enabled_types);
src->enabled_types = NULL;
- if (src->data) {
- kfree(src->data);
- kfree(src);
- adev->irq.client[i].sources[j] = NULL;
- }
}
kfree(adev->irq.client[i].sources);
+ adev->irq.client[i].sources = NULL;
}
}
/**
- * amdgpu_irq_add_id - register irq source
+ * amdgpu_irq_add_id - register IRQ source
*
* @adev: amdgpu device pointer
- * @src_id: source id for this source
- * @source: irq source
+ * @client_id: client id
+ * @src_id: source id
+ * @source: IRQ source pointer
+ *
+ * Registers IRQ source on a client.
*
+ * Returns:
+ * 0 on success or error code otherwise
*/
int amdgpu_irq_add_id(struct amdgpu_device *adev,
- unsigned client_id, unsigned src_id,
+ unsigned int client_id, unsigned int src_id,
struct amdgpu_irq_src *source)
{
- if (client_id >= AMDGPU_IH_CLIENTID_MAX)
+ if (client_id >= AMDGPU_IRQ_CLIENTID_MAX)
return -EINVAL;
if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
@@ -343,65 +427,107 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
}
/**
- * amdgpu_irq_dispatch - dispatch irq to IP blocks
+ * amdgpu_irq_dispatch - dispatch IRQ to IP blocks
*
* @adev: amdgpu device pointer
- * @entry: interrupt vector
+ * @ih: interrupt ring instance
*
- * Dispatches the irq to the different IP blocks
+ * Dispatches IRQ to IP blocks.
*/
void amdgpu_irq_dispatch(struct amdgpu_device *adev,
- struct amdgpu_iv_entry *entry)
+ struct amdgpu_ih_ring *ih)
{
- unsigned client_id = entry->client_id;
- unsigned src_id = entry->src_id;
+ u32 ring_index = ih->rptr >> 2;
+ struct amdgpu_iv_entry entry;
+ unsigned int client_id, src_id;
struct amdgpu_irq_src *src;
+ bool handled = false;
int r;
- trace_amdgpu_iv(entry);
+ entry.ih = ih;
+ entry.iv_entry = (const uint32_t *)&ih->ring[ring_index];
- if (client_id >= AMDGPU_IH_CLIENTID_MAX) {
- DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
- return;
- }
+ /*
+ * timestamp is not supported on some legacy SOCs (cik, cz, iceland,
+ * si and tonga), so initialize timestamp and timestamp_src to 0
+ */
+ entry.timestamp = 0;
+ entry.timestamp_src = 0;
- if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
- DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
- return;
- }
+ amdgpu_ih_decode_iv(adev, &entry);
- if (adev->irq.virq[src_id]) {
- generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
- } else {
- if (!adev->irq.client[client_id].sources) {
- DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
- client_id, src_id);
- return;
- }
+ trace_amdgpu_iv(ih - &adev->irq.ih, &entry);
- src = adev->irq.client[client_id].sources[src_id];
- if (!src) {
- DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
- return;
- }
+ client_id = entry.client_id;
+ src_id = entry.src_id;
+
+ if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) {
+ dev_dbg(adev->dev, "Invalid client_id in IV: %d\n", client_id);
+
+ } else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
+ dev_dbg(adev->dev, "Invalid src_id in IV: %d\n", src_id);
+
+ } else if (((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) ||
+ (client_id == SOC15_IH_CLIENTID_ISP)) &&
+ adev->irq.virq[src_id]) {
+ generic_handle_domain_irq(adev->irq.domain, src_id);
+
+ } else if (!adev->irq.client[client_id].sources) {
+ dev_dbg(adev->dev,
+ "Unregistered interrupt client_id: %d src_id: %d\n",
+ client_id, src_id);
+
+ } else if ((src = adev->irq.client[client_id].sources[src_id])) {
+ r = src->funcs->process(adev, src, &entry);
+ if (r < 0)
+ dev_err(adev->dev, "error processing interrupt (%d)\n",
+ r);
+ else if (r)
+ handled = true;
- r = src->funcs->process(adev, src, entry);
- if (r)
- DRM_ERROR("error processing interrupt (%d)\n", r);
+ } else {
+ dev_dbg(adev->dev,
+ "Unregistered interrupt src_id: %d of client_id:%d\n",
+ src_id, client_id);
}
+
+ /* Send it to amdkfd as well if it isn't already handled */
+ if (!handled)
+ amdgpu_amdkfd_interrupt(adev, entry.iv_entry);
+
+ if (amdgpu_ih_ts_after(ih->processed_timestamp, entry.timestamp))
+ ih->processed_timestamp = entry.timestamp;
+}
+
+/**
+ * amdgpu_irq_delegate - delegate IV to soft IH ring
+ *
+ * @adev: amdgpu device pointer
+ * @entry: IV entry
+ * @num_dw: size of IV
+ *
+ * Delegate the IV to the soft IH ring and schedule processing of it. Used
+ * if the hardware delegation to IH1 or IH2 doesn't work for some reason.
+ */
+void amdgpu_irq_delegate(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry,
+ unsigned int num_dw)
+{
+ amdgpu_ih_ring_write(adev, &adev->irq.ih_soft, entry->iv_entry, num_dw);
+ schedule_work(&adev->irq.ih_soft_work);
}
/**
- * amdgpu_irq_update - update hw interrupt state
+ * amdgpu_irq_update - update hardware interrupt state
*
* @adev: amdgpu device pointer
- * @src: interrupt src you want to enable
- * @type: type of interrupt you want to update
+ * @src: interrupt source pointer
+ * @type: type of interrupt
*
- * Updates the interrupt state for a specific src (all asics).
+ * Updates interrupt state for the specific source (all ASICs).
*/
int amdgpu_irq_update(struct amdgpu_device *adev,
- struct amdgpu_irq_src *src, unsigned type)
+ struct amdgpu_irq_src *src, unsigned int type)
{
unsigned long irqflags;
enum amdgpu_interrupt_state state;
@@ -409,8 +535,9 @@ int amdgpu_irq_update(struct amdgpu_device *adev,
spin_lock_irqsave(&adev->irq.lock, irqflags);
- /* we need to determine after taking the lock, otherwise
- we might disable just enabled interrupts again */
+ /* We need to determine after taking the lock, otherwise
+ * we might disable just enabled interrupts again
+ */
if (amdgpu_irq_enabled(adev, src, type))
state = AMDGPU_IRQ_STATE_ENABLE;
else
@@ -421,18 +548,29 @@ int amdgpu_irq_update(struct amdgpu_device *adev,
return r;
}
+/**
+ * amdgpu_irq_gpu_reset_resume_helper - update interrupt states on all sources
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Updates state of all types of interrupts on all sources on resume after
+ * reset.
+ */
void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
{
int i, j, k;
- for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
+ if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
+ amdgpu_restore_msix(adev);
+
+ for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
if (!adev->irq.client[i].sources)
continue;
for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
- if (!src)
+ if (!src || !src->funcs || !src->funcs->set)
continue;
for (k = 0; k < src->num_types; k++)
amdgpu_irq_update(adev, src, k);
@@ -444,15 +582,18 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
* amdgpu_irq_get - enable interrupt
*
* @adev: amdgpu device pointer
- * @src: interrupt src you want to enable
- * @type: type of interrupt you want to enable
+ * @src: interrupt source pointer
+ * @type: type of interrupt
*
- * Enables the interrupt type for a specific src (all asics).
+ * Enables specified type of interrupt on the specified source (all ASICs).
+ *
+ * Returns:
+ * 0 on success or error code otherwise
*/
int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
- unsigned type)
+ unsigned int type)
{
- if (!adev->ddev->irq_enabled)
+ if (!adev->irq.installed)
return -ENOENT;
if (type >= src->num_types)
@@ -471,15 +612,22 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
* amdgpu_irq_put - disable interrupt
*
* @adev: amdgpu device pointer
- * @src: interrupt src you want to disable
- * @type: type of interrupt you want to disable
+ * @src: interrupt source pointer
+ * @type: type of interrupt
+ *
+ * Enables specified type of interrupt on the specified source (all ASICs).
*
- * Disables the interrupt type for a specific src (all asics).
+ * Returns:
+ * 0 on success or error code otherwise
*/
int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
- unsigned type)
+ unsigned int type)
{
- if (!adev->ddev->irq_enabled)
+ /* When the threshold is reached,the interrupt source may not be enabled.return -EINVAL */
+ if (amdgpu_ras_is_rma(adev) && !amdgpu_irq_enabled(adev, src, type))
+ return -EINVAL;
+
+ if (!adev->irq.installed)
return -ENOENT;
if (type >= src->num_types)
@@ -488,6 +636,9 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
if (!src->enabled_types || !src->funcs->set)
return -EINVAL;
+ if (WARN_ON(!amdgpu_irq_enabled(adev, src, type)))
+ return -EINVAL;
+
if (atomic_dec_and_test(&src->enabled_types[type]))
return amdgpu_irq_update(adev, src, type);
@@ -495,17 +646,22 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
}
/**
- * amdgpu_irq_enabled - test if irq is enabled or not
+ * amdgpu_irq_enabled - check whether interrupt is enabled or not
*
* @adev: amdgpu device pointer
- * @idx: interrupt src you want to test
+ * @src: interrupt source pointer
+ * @type: type of interrupt
*
- * Tests if the given interrupt source is enabled or not
+ * Checks whether the given type of interrupt is enabled on the given source.
+ *
+ * Returns:
+ * *true* if interrupt is enabled, *false* if interrupt is disabled or on
+ * invalid parameters
*/
bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
- unsigned type)
+ unsigned int type)
{
- if (!adev->ddev->irq_enabled)
+ if (!adev->irq.installed)
return false;
if (type >= src->num_types)
@@ -517,7 +673,7 @@ bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
return !!atomic_read(&src->enabled_types[type]);
}
-/* gen irq */
+/* XXX: Generic IRQ handling */
static void amdgpu_irq_mask(struct irq_data *irqd)
{
/* XXX */
@@ -528,12 +684,26 @@ static void amdgpu_irq_unmask(struct irq_data *irqd)
/* XXX */
}
+/* amdgpu hardware interrupt chip descriptor */
static struct irq_chip amdgpu_irq_chip = {
.name = "amdgpu-ih",
.irq_mask = amdgpu_irq_mask,
.irq_unmask = amdgpu_irq_unmask,
};
+/**
+ * amdgpu_irqdomain_map - create mapping between virtual and hardware IRQ numbers
+ *
+ * @d: amdgpu IRQ domain pointer (unused)
+ * @irq: virtual IRQ number
+ * @hwirq: hardware irq number
+ *
+ * Current implementation assigns simple interrupt handler to the given virtual
+ * IRQ.
+ *
+ * Returns:
+ * 0 on success or error code otherwise
+ */
static int amdgpu_irqdomain_map(struct irq_domain *d,
unsigned int irq, irq_hw_number_t hwirq)
{
@@ -545,24 +715,28 @@ static int amdgpu_irqdomain_map(struct irq_domain *d,
return 0;
}
+/* Implementation of methods for amdgpu IRQ domain */
static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
.map = amdgpu_irqdomain_map,
};
/**
- * amdgpu_irq_add_domain - create a linear irq domain
+ * amdgpu_irq_add_domain - create a linear IRQ domain
*
* @adev: amdgpu device pointer
*
- * Create an irq domain for GPU interrupt sources
+ * Creates an IRQ domain for GPU interrupt sources
* that may be driven by another driver (e.g., ACP).
+ *
+ * Returns:
+ * 0 on success or error code otherwise
*/
int amdgpu_irq_add_domain(struct amdgpu_device *adev)
{
- adev->irq.domain = irq_domain_add_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
- &amdgpu_hw_irqdomain_ops, adev);
+ adev->irq.domain = irq_domain_create_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID,
+ &amdgpu_hw_irqdomain_ops, adev);
if (!adev->irq.domain) {
- DRM_ERROR("GPU irq add domain failed\n");
+ dev_err(adev->dev, "GPU irq add domain failed\n");
return -ENODEV;
}
@@ -570,11 +744,11 @@ int amdgpu_irq_add_domain(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_remove_domain - remove the irq domain
+ * amdgpu_irq_remove_domain - remove the IRQ domain
*
* @adev: amdgpu device pointer
*
- * Remove the irq domain for GPU interrupt sources
+ * Removes the IRQ domain for GPU interrupt sources
* that may be driven by another driver (e.g., ACP).
*/
void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
@@ -586,18 +760,19 @@ void amdgpu_irq_remove_domain(struct amdgpu_device *adev)
}
/**
- * amdgpu_irq_create_mapping - create a mapping between a domain irq and a
- * Linux irq
+ * amdgpu_irq_create_mapping - create mapping between domain Linux IRQs
*
* @adev: amdgpu device pointer
* @src_id: IH source id
*
- * Create a mapping between a domain irq (GPU IH src id) and a Linux irq
+ * Creates mapping between a domain IRQ (GPU IH src id) and a Linux IRQ
* Use this for components that generate a GPU interrupt, but are driven
* by a different driver (e.g., ACP).
- * Returns the Linux irq.
+ *
+ * Returns:
+ * Linux IRQ
*/
-unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id)
+unsigned int amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned int src_id)
{
adev->irq.virq[src_id] = irq_create_mapping(adev->irq.domain, src_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index 0610cc4a9788..9f0417456abd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -25,24 +25,43 @@
#define __AMDGPU_IRQ_H__
#include <linux/irqdomain.h>
+#include "soc15_ih_clientid.h"
#include "amdgpu_ih.h"
-#define AMDGPU_MAX_IRQ_SRC_ID 0x100
+#define AMDGPU_MAX_IRQ_SRC_ID 0x100
#define AMDGPU_MAX_IRQ_CLIENT_ID 0x100
+#define AMDGPU_IRQ_CLIENTID_LEGACY 0
+#define AMDGPU_IRQ_CLIENTID_MAX SOC15_IH_CLIENTID_MAX
+
+#define AMDGPU_IRQ_SRC_DATA_MAX_SIZE_DW 4
+
struct amdgpu_device;
-struct amdgpu_iv_entry;
enum amdgpu_interrupt_state {
AMDGPU_IRQ_STATE_DISABLE,
AMDGPU_IRQ_STATE_ENABLE,
};
+struct amdgpu_iv_entry {
+ struct amdgpu_ih_ring *ih;
+ unsigned client_id;
+ unsigned src_id;
+ unsigned ring_id;
+ unsigned vmid;
+ unsigned vmid_src;
+ uint64_t timestamp;
+ unsigned timestamp_src;
+ unsigned pasid;
+ unsigned node_id;
+ unsigned src_data[AMDGPU_IRQ_SRC_DATA_MAX_SIZE_DW];
+ const uint32_t *iv_entry;
+};
+
struct amdgpu_irq_src {
unsigned num_types;
atomic_t *enabled_types;
const struct amdgpu_irq_src_funcs *funcs;
- void *data;
};
struct amdgpu_irq_client {
@@ -61,35 +80,59 @@ struct amdgpu_irq_src_funcs {
struct amdgpu_irq {
bool installed;
+ unsigned int irq;
spinlock_t lock;
/* interrupt sources */
- struct amdgpu_irq_client client[AMDGPU_IH_CLIENTID_MAX];
+ struct amdgpu_irq_client client[AMDGPU_IRQ_CLIENTID_MAX];
/* status, etc. */
bool msi_enabled; /* msi enabled */
- /* interrupt ring */
- struct amdgpu_ih_ring ih;
- const struct amdgpu_ih_funcs *ih_funcs;
+ /* interrupt rings */
+ struct amdgpu_ih_ring ih, ih1, ih2, ih_soft;
+ const struct amdgpu_ih_funcs *ih_funcs;
+ struct work_struct ih1_work, ih2_work, ih_soft_work;
+ struct amdgpu_irq_src self_irq;
/* gen irq stuff */
struct irq_domain *domain; /* GPU irq controller domain */
unsigned virq[AMDGPU_MAX_IRQ_SRC_ID];
uint32_t srbm_soft_reset;
+ u32 retry_cam_doorbell_index;
+ bool retry_cam_enabled;
};
-void amdgpu_irq_preinstall(struct drm_device *dev);
-int amdgpu_irq_postinstall(struct drm_device *dev);
-void amdgpu_irq_uninstall(struct drm_device *dev);
-irqreturn_t amdgpu_irq_handler(int irq, void *arg);
+enum interrupt_node_id_per_aid {
+ AID0_NODEID = 0,
+ XCD0_NODEID = 1,
+ XCD1_NODEID = 2,
+ AID1_NODEID = 4,
+ XCD2_NODEID = 5,
+ XCD3_NODEID = 6,
+ AID2_NODEID = 8,
+ XCD4_NODEID = 9,
+ XCD5_NODEID = 10,
+ AID3_NODEID = 12,
+ XCD6_NODEID = 13,
+ XCD7_NODEID = 14,
+ NODEID_MAX,
+};
+
+extern const int node_id_to_phys_map[NODEID_MAX];
+
+void amdgpu_irq_disable_all(struct amdgpu_device *adev);
int amdgpu_irq_init(struct amdgpu_device *adev);
-void amdgpu_irq_fini(struct amdgpu_device *adev);
+void amdgpu_irq_fini_sw(struct amdgpu_device *adev);
+void amdgpu_irq_fini_hw(struct amdgpu_device *adev);
int amdgpu_irq_add_id(struct amdgpu_device *adev,
unsigned client_id, unsigned src_id,
struct amdgpu_irq_src *source);
void amdgpu_irq_dispatch(struct amdgpu_device *adev,
- struct amdgpu_iv_entry *entry);
+ struct amdgpu_ih_ring *ih);
+void amdgpu_irq_delegate(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry,
+ unsigned int num_dw);
int amdgpu_irq_update(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type);
int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
@@ -103,5 +146,6 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev);
int amdgpu_irq_add_domain(struct amdgpu_device *adev);
void amdgpu_irq_remove_domain(struct amdgpu_device *adev);
unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id);
+void amdgpu_restore_msix(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
new file mode 100644
index 000000000000..9cddbf50442a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c
@@ -0,0 +1,343 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2024 Advanced Micro Devices, Inc. All rights reserved.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/mfd/core.h>
+
+#include "amdgpu.h"
+#include "amdgpu_isp.h"
+#include "isp_v4_1_0.h"
+#include "isp_v4_1_1.h"
+
+#define ISP_MC_ADDR_ALIGN (1024 * 32)
+
+/**
+ * isp_hw_init - start and test isp block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ */
+static int isp_hw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_isp *isp = &adev->isp;
+
+ if (isp->funcs->hw_init != NULL)
+ return isp->funcs->hw_init(isp);
+
+ return -ENODEV;
+}
+
+/**
+ * isp_hw_fini - stop the hardware block
+ *
+ * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
+ *
+ */
+static int isp_hw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_isp *isp = &ip_block->adev->isp;
+
+ if (isp->funcs->hw_fini != NULL)
+ return isp->funcs->hw_fini(isp);
+
+ return -ENODEV;
+}
+
+static int isp_load_fw_by_psp(struct amdgpu_device *adev)
+{
+ const struct common_firmware_header *hdr;
+ char ucode_prefix[10];
+ int r = 0;
+
+ /* get isp fw binary name and path */
+ amdgpu_ucode_ip_version_decode(adev, ISP_HWIP, ucode_prefix,
+ sizeof(ucode_prefix));
+
+ /* read isp fw */
+ r = amdgpu_ucode_request(adev, &adev->isp.fw, AMDGPU_UCODE_OPTIONAL,
+ "amdgpu/%s.bin", ucode_prefix);
+ if (r) {
+ amdgpu_ucode_release(&adev->isp.fw);
+ return r;
+ }
+
+ hdr = (const struct common_firmware_header *)adev->isp.fw->data;
+
+ adev->firmware.ucode[AMDGPU_UCODE_ID_ISP].ucode_id =
+ AMDGPU_UCODE_ID_ISP;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_ISP].fw = adev->isp.fw;
+
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
+
+ return r;
+}
+
+static int isp_early_init(struct amdgpu_ip_block *ip_block)
+{
+
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_isp *isp = &adev->isp;
+
+ switch (amdgpu_ip_version(adev, ISP_HWIP, 0)) {
+ case IP_VERSION(4, 1, 0):
+ isp_v4_1_0_set_isp_funcs(isp);
+ break;
+ case IP_VERSION(4, 1, 1):
+ isp_v4_1_1_set_isp_funcs(isp);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ isp->adev = adev;
+ isp->parent = adev->dev;
+
+ if (isp_load_fw_by_psp(adev)) {
+ DRM_DEBUG_DRIVER("%s: isp fw load failed\n", __func__);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static bool isp_is_idle(struct amdgpu_ip_block *ip_block)
+{
+ return true;
+}
+
+static int isp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int isp_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+static int is_valid_isp_device(struct device *isp_parent, struct device *amdgpu_dev)
+{
+ if (isp_parent != amdgpu_dev)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * isp_user_buffer_alloc - create user buffer object (BO) for isp
+ *
+ * @dev: isp device handle
+ * @dmabuf: DMABUF handle for isp buffer allocated in system memory
+ * @buf_obj: GPU buffer object handle to initialize
+ * @buf_addr: GPU addr of the pinned BO to initialize
+ *
+ * Imports isp DMABUF to allocate and pin a user BO for isp internal use. It does
+ * GART alloc to generate GPU addr for BO to make it accessible through the
+ * GART aperture for ISP HW.
+ *
+ * This function is exported to allow the V4L2 isp device external to drm device
+ * to create and access the isp user BO.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+int isp_user_buffer_alloc(struct device *dev, void *dmabuf,
+ void **buf_obj, u64 *buf_addr)
+{
+ struct platform_device *ispdev = to_platform_device(dev);
+ const struct isp_platform_data *isp_pdata;
+ struct amdgpu_device *adev;
+ struct mfd_cell *mfd_cell;
+ struct amdgpu_bo *bo;
+ u64 gpu_addr;
+ int ret;
+
+ if (WARN_ON(!ispdev))
+ return -ENODEV;
+
+ if (WARN_ON(!buf_obj))
+ return -EINVAL;
+
+ if (WARN_ON(!buf_addr))
+ return -EINVAL;
+
+ mfd_cell = &ispdev->mfd_cell[0];
+ if (!mfd_cell)
+ return -ENODEV;
+
+ isp_pdata = mfd_cell->platform_data;
+ adev = isp_pdata->adev;
+
+ ret = is_valid_isp_device(ispdev->dev.parent, adev->dev);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_bo_create_isp_user(adev, dmabuf,
+ AMDGPU_GEM_DOMAIN_GTT, &bo, &gpu_addr);
+ if (ret) {
+ drm_err(&adev->ddev, "failed to alloc gart user buffer (%d)", ret);
+ return ret;
+ }
+
+ *buf_obj = (void *)bo;
+ *buf_addr = gpu_addr;
+
+ return 0;
+}
+EXPORT_SYMBOL(isp_user_buffer_alloc);
+
+/**
+ * isp_user_buffer_free - free isp user buffer object (BO)
+ *
+ * @buf_obj: amdgpu isp user BO to free
+ *
+ * unpin and unref BO for isp internal use.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to free the isp user BO.
+ */
+void isp_user_buffer_free(void *buf_obj)
+{
+ amdgpu_bo_free_isp_user(buf_obj);
+}
+EXPORT_SYMBOL(isp_user_buffer_free);
+
+/**
+ * isp_kernel_buffer_alloc - create kernel buffer object (BO) for isp
+ *
+ * @dev: isp device handle
+ * @size: size for the new BO
+ * @buf_obj: GPU BO handle to initialize
+ * @gpu_addr: GPU addr of the pinned BO
+ * @cpu_addr: CPU address mapping of BO
+ *
+ * Allocates and pins a kernel BO for internal isp firmware use.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to create and access the kernel BO.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+int isp_kernel_buffer_alloc(struct device *dev, u64 size,
+ void **buf_obj, u64 *gpu_addr, void **cpu_addr)
+{
+ struct platform_device *ispdev = to_platform_device(dev);
+ struct amdgpu_bo **bo = (struct amdgpu_bo **)buf_obj;
+ const struct isp_platform_data *isp_pdata;
+ struct amdgpu_device *adev;
+ struct mfd_cell *mfd_cell;
+ int ret;
+
+ if (WARN_ON(!ispdev))
+ return -ENODEV;
+
+ if (WARN_ON(!buf_obj))
+ return -EINVAL;
+
+ if (WARN_ON(!gpu_addr))
+ return -EINVAL;
+
+ if (WARN_ON(!cpu_addr))
+ return -EINVAL;
+
+ mfd_cell = &ispdev->mfd_cell[0];
+ if (!mfd_cell)
+ return -ENODEV;
+
+ isp_pdata = mfd_cell->platform_data;
+ adev = isp_pdata->adev;
+
+ ret = is_valid_isp_device(ispdev->dev.parent, adev->dev);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_bo_create_kernel(adev,
+ size,
+ ISP_MC_ADDR_ALIGN,
+ AMDGPU_GEM_DOMAIN_GTT,
+ bo,
+ gpu_addr,
+ cpu_addr);
+ if (!cpu_addr || ret) {
+ drm_err(&adev->ddev, "failed to alloc gart kernel buffer (%d)", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(isp_kernel_buffer_alloc);
+
+/**
+ * isp_kernel_buffer_free - free isp kernel buffer object (BO)
+ *
+ * @buf_obj: amdgpu isp user BO to free
+ * @gpu_addr: GPU addr of isp kernel BO
+ * @cpu_addr: CPU addr of isp kernel BO
+ *
+ * unmaps and unpin a isp kernel BO.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to free the kernel BO.
+ */
+void isp_kernel_buffer_free(void **buf_obj, u64 *gpu_addr, void **cpu_addr)
+{
+ struct amdgpu_bo **bo = (struct amdgpu_bo **)buf_obj;
+
+ amdgpu_bo_free_kernel(bo, gpu_addr, cpu_addr);
+}
+EXPORT_SYMBOL(isp_kernel_buffer_free);
+
+static const struct amd_ip_funcs isp_ip_funcs = {
+ .name = "isp_ip",
+ .early_init = isp_early_init,
+ .hw_init = isp_hw_init,
+ .hw_fini = isp_hw_fini,
+ .is_idle = isp_is_idle,
+ .set_clockgating_state = isp_set_clockgating_state,
+ .set_powergating_state = isp_set_powergating_state,
+};
+
+const struct amdgpu_ip_block_version isp_v4_1_0_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_ISP,
+ .major = 4,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &isp_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version isp_v4_1_1_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_ISP,
+ .major = 4,
+ .minor = 1,
+ .rev = 1,
+ .funcs = &isp_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
new file mode 100644
index 000000000000..d6f4ffa4c97c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2024 Advanced Micro Devices, Inc. All rights reserved.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+
+#ifndef __AMDGPU_ISP_H__
+#define __AMDGPU_ISP_H__
+
+#include <drm/amd/isp.h>
+#include <linux/pm_domain.h>
+
+#define ISP_REGS_OFFSET_END 0x629A4
+
+struct amdgpu_isp;
+
+struct isp_funcs {
+ int (*hw_init)(struct amdgpu_isp *isp);
+ int (*hw_fini)(struct amdgpu_isp *isp);
+};
+
+struct amdgpu_isp {
+ struct device *parent;
+ struct amdgpu_device *adev;
+ const struct isp_funcs *funcs;
+ struct mfd_cell *isp_cell;
+ struct resource *isp_res;
+ struct resource *isp_i2c_res;
+ struct resource *isp_gpio_res;
+ struct isp_platform_data *isp_pdata;
+ unsigned int harvest_config;
+ const struct firmware *fw;
+ struct generic_pm_domain ispgpd;
+};
+
+extern const struct amdgpu_ip_block_version isp_v4_1_0_ip_block;
+extern const struct amdgpu_ip_block_version isp_v4_1_1_ip_block;
+
+#endif /* __AMDGPU_ISP_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 7570f2439a11..d020a890a0ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -24,169 +24,447 @@
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
-#include <drm/drmP.h>
+
+#include <drm/drm_drv.h>
+
#include "amdgpu.h"
#include "amdgpu_trace.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_dev_coredump.h"
+#include "amdgpu_xgmi.h"
-static void amdgpu_job_timedout(struct amd_sched_job *s_job)
+static void amdgpu_job_do_core_dump(struct amdgpu_device *adev,
+ struct amdgpu_job *job)
{
- struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
+ int i;
+
+ dev_info(adev->dev, "Dumping IP State\n");
+ for (i = 0; i < adev->num_ip_blocks; i++)
+ if (adev->ip_blocks[i].version->funcs->dump_ip_state)
+ adev->ip_blocks[i].version->funcs
+ ->dump_ip_state((void *)&adev->ip_blocks[i]);
+ dev_info(adev->dev, "Dumping IP State Completed\n");
- DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
- job->base.sched->name,
- atomic_read(&job->ring->fence_drv.last_seq),
- job->ring->fence_drv.sync_seq);
- amdgpu_gpu_reset(job->adev);
+ amdgpu_coredump(adev, true, false, job);
}
-int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
- struct amdgpu_job **job, struct amdgpu_vm *vm)
+static void amdgpu_job_core_dump(struct amdgpu_device *adev,
+ struct amdgpu_job *job)
{
- size_t size = sizeof(struct amdgpu_job);
+ struct list_head device_list, *device_list_handle = NULL;
+ struct amdgpu_device *tmp_adev = NULL;
+ struct amdgpu_hive_info *hive = NULL;
+
+ if (!amdgpu_sriov_vf(adev))
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive)
+ mutex_lock(&hive->hive_lock);
+ /*
+ * Reuse the logic in amdgpu_device_gpu_recover() to build list of
+ * devices for code dump
+ */
+ INIT_LIST_HEAD(&device_list);
+ if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
+ list_add_tail(&tmp_adev->reset_list, &device_list);
+ if (!list_is_first(&adev->reset_list, &device_list))
+ list_rotate_to_front(&adev->reset_list, &device_list);
+ device_list_handle = &device_list;
+ } else {
+ list_add_tail(&adev->reset_list, &device_list);
+ device_list_handle = &device_list;
+ }
+
+ /* Do the coredump for each device */
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list)
+ amdgpu_job_do_core_dump(tmp_adev, job);
+
+ if (hive) {
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
+ }
+}
+
+static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
+{
+ struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
+ struct amdgpu_job *job = to_amdgpu_job(s_job);
+ struct drm_wedge_task_info *info = NULL;
+ struct amdgpu_task_info *ti = NULL;
+ struct amdgpu_device *adev = ring->adev;
+ int idx, r;
+
+ if (!drm_dev_enter(adev_to_drm(adev), &idx)) {
+ dev_info(adev->dev, "%s - device unplugged skipping recovery on scheduler:%s",
+ __func__, s_job->sched->name);
+
+ /* Effectively the job is aborted as the device is gone */
+ return DRM_GPU_SCHED_STAT_ENODEV;
+ }
+
+ /*
+ * Do the coredump immediately after a job timeout to get a very
+ * close dump/snapshot/representation of GPU's current error status
+ * Skip it for SRIOV, since VF FLR will be triggered by host driver
+ * before job timeout
+ */
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_job_core_dump(adev, job);
+
+ if (amdgpu_gpu_recovery &&
+ amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_SOFT_RESET) &&
+ amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
+ dev_err(adev->dev, "ring %s timeout, but soft recovered\n",
+ s_job->sched->name);
+ goto exit;
+ }
+
+ dev_err(adev->dev, "ring %s timeout, signaled seq=%u, emitted seq=%u\n",
+ job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
+ ring->fence_drv.sync_seq);
+
+ ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid);
+ if (ti) {
+ amdgpu_vm_print_task_info(adev, ti);
+ info = &ti->task;
+ }
+
+ /* attempt a per ring reset */
+ if (unlikely(adev->debug_disable_gpu_ring_reset)) {
+ dev_err(adev->dev, "Ring reset disabled by debug mask\n");
+ } else if (amdgpu_gpu_recovery &&
+ amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_PER_QUEUE) &&
+ ring->funcs->reset) {
+ dev_err(adev->dev, "Starting %s ring reset\n",
+ s_job->sched->name);
+ r = amdgpu_ring_reset(ring, job->vmid, &job->hw_fence);
+ if (!r) {
+ atomic_inc(&ring->adev->gpu_reset_counter);
+ dev_err(adev->dev, "Ring %s reset succeeded\n",
+ ring->sched.name);
+ drm_dev_wedged_event(adev_to_drm(adev),
+ DRM_WEDGE_RECOVERY_NONE, info);
+ goto exit;
+ }
+ dev_err(adev->dev, "Ring %s reset failed\n", ring->sched.name);
+ }
+
+ dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
+
+ if (amdgpu_device_should_recover_gpu(ring->adev)) {
+ struct amdgpu_reset_context reset_context;
+ memset(&reset_context, 0, sizeof(reset_context));
+
+ reset_context.method = AMD_RESET_METHOD_NONE;
+ reset_context.reset_req_dev = adev;
+ reset_context.src = AMDGPU_RESET_SRC_JOB;
+ clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+
+ /*
+ * To avoid an unnecessary extra coredump, as we have already
+ * got the very close representation of GPU's error status
+ */
+ set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
+ r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
+ if (r)
+ dev_err(adev->dev, "GPU Recovery Failed: %d\n", r);
+ } else {
+ drm_sched_suspend_timeout(&ring->sched);
+ if (amdgpu_sriov_vf(adev))
+ adev->virt.tdr_debug = true;
+ }
+
+exit:
+ amdgpu_vm_put_task_info(ti);
+ drm_dev_exit(idx);
+ return DRM_GPU_SCHED_STAT_RESET;
+}
+
+int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct drm_sched_entity *entity, void *owner,
+ unsigned int num_ibs, struct amdgpu_job **job,
+ u64 drm_client_id)
+{
if (num_ibs == 0)
return -EINVAL;
- size += sizeof(struct amdgpu_ib) * num_ibs;
-
- *job = kzalloc(size, GFP_KERNEL);
+ *job = kzalloc(struct_size(*job, ibs, num_ibs), GFP_KERNEL);
if (!*job)
return -ENOMEM;
- (*job)->adev = adev;
(*job)->vm = vm;
- (*job)->ibs = (void *)&(*job)[1];
- (*job)->num_ibs = num_ibs;
- (*job)->need_pipeline_sync = false;
- amdgpu_sync_create(&(*job)->sync);
+ amdgpu_sync_create(&(*job)->explicit_sync);
+ (*job)->generation = amdgpu_vm_generation(adev, vm);
+ (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
- return 0;
+ if (!entity)
+ return 0;
+
+ return drm_sched_job_init(&(*job)->base, entity, 1, owner,
+ drm_client_id);
}
-int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
- struct amdgpu_job **job)
+int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
+ struct drm_sched_entity *entity, void *owner,
+ size_t size, enum amdgpu_ib_pool_type pool_type,
+ struct amdgpu_job **job, u64 k_job_id)
{
int r;
- r = amdgpu_job_alloc(adev, 1, job, NULL);
+ r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job,
+ k_job_id);
if (r)
return r;
- r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
- if (r)
+ (*job)->num_ibs = 1;
+ r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
+ if (r) {
+ if (entity)
+ drm_sched_job_cleanup(&(*job)->base);
kfree(*job);
+ }
return r;
}
+void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
+ struct amdgpu_bo *gws, struct amdgpu_bo *oa)
+{
+ if (gds) {
+ job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
+ job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
+ }
+ if (gws) {
+ job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
+ job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
+ }
+ if (oa) {
+ job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
+ job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
+ }
+}
+
void amdgpu_job_free_resources(struct amdgpu_job *job)
{
struct dma_fence *f;
unsigned i;
- /* use sched fence if available */
- f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
+ /* Check if any fences where initialized */
+ if (job->base.s_fence && job->base.s_fence->finished.ops)
+ f = &job->base.s_fence->finished;
+ else if (job->hw_fence.base.ops)
+ f = &job->hw_fence.base;
+ else
+ f = NULL;
for (i = 0; i < job->num_ibs; ++i)
- amdgpu_ib_free(job->adev, &job->ibs[i], f);
+ amdgpu_ib_free(&job->ibs[i], f);
}
-static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
+static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
{
- struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
+ struct amdgpu_job *job = to_amdgpu_job(s_job);
- dma_fence_put(job->fence);
- amdgpu_sync_free(&job->sync);
- kfree(job);
+ drm_sched_job_cleanup(s_job);
+
+ amdgpu_sync_free(&job->explicit_sync);
+
+ /* only put the hw fence if has embedded fence */
+ if (!job->hw_fence.base.ops)
+ kfree(job);
+ else
+ dma_fence_put(&job->hw_fence.base);
+}
+
+void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
+ struct amdgpu_job *leader)
+{
+ struct dma_fence *fence = &leader->base.s_fence->scheduled;
+
+ WARN_ON(job->gang_submit);
+
+ /*
+ * Don't add a reference when we are the gang leader to avoid circle
+ * dependency.
+ */
+ if (job != leader)
+ dma_fence_get(fence);
+ job->gang_submit = fence;
}
void amdgpu_job_free(struct amdgpu_job *job)
{
+ if (job->base.entity)
+ drm_sched_job_cleanup(&job->base);
+
amdgpu_job_free_resources(job);
+ amdgpu_sync_free(&job->explicit_sync);
+ if (job->gang_submit != &job->base.s_fence->scheduled)
+ dma_fence_put(job->gang_submit);
+
+ if (!job->hw_fence.base.ops)
+ kfree(job);
+ else
+ dma_fence_put(&job->hw_fence.base);
+}
- dma_fence_put(job->fence);
- amdgpu_sync_free(&job->sync);
- kfree(job);
+struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)
+{
+ struct dma_fence *f;
+
+ drm_sched_job_arm(&job->base);
+ f = dma_fence_get(&job->base.s_fence->finished);
+ amdgpu_job_free_resources(job);
+ drm_sched_entity_push_job(&job->base);
+
+ return f;
}
-int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
- struct amd_sched_entity *entity, void *owner,
- struct dma_fence **f)
+int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
+ struct dma_fence **fence)
{
int r;
- job->ring = ring;
- if (!f)
- return -EINVAL;
+ job->base.sched = &ring->sched;
+ r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, fence);
- r = amd_sched_job_init(&job->base, &ring->sched, entity, owner);
if (r)
return r;
- job->owner = owner;
- job->fence_ctx = entity->fence_context;
- *f = dma_fence_get(&job->base.s_fence->finished);
- amdgpu_job_free_resources(job);
- amd_sched_entity_push_job(&job->base);
-
+ amdgpu_job_free(job);
return 0;
}
-static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
+static struct dma_fence *
+amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *s_entity)
{
+ struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
struct amdgpu_job *job = to_amdgpu_job(sched_job);
- struct amdgpu_vm *vm = job->vm;
-
- struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync);
+ struct dma_fence *fence;
+ int r;
- while (fence == NULL && vm && !job->vm_id) {
- struct amdgpu_ring *ring = job->ring;
- int r;
+ r = drm_sched_entity_error(s_entity);
+ if (r)
+ goto error;
- r = amdgpu_vm_grab_id(vm, ring, &job->sync,
- &job->base.s_fence->finished,
- job);
- if (r)
- DRM_ERROR("Error getting VM ID (%d)\n", r);
+ if (job->gang_submit) {
+ fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
+ if (fence)
+ return fence;
+ }
- fence = amdgpu_sync_get_fence(&job->sync);
+ fence = amdgpu_device_enforce_isolation(ring->adev, ring, job);
+ if (fence)
+ return fence;
+
+ if (job->vm && !job->vmid) {
+ r = amdgpu_vmid_grab(job->vm, ring, job, &fence);
+ if (r) {
+ dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r);
+ goto error;
+ }
+ return fence;
}
- if (amd_sched_dependency_optimized(fence, sched_job->s_entity))
- job->need_pipeline_sync = true;
+ return NULL;
- return fence;
+error:
+ dma_fence_set_error(&job->base.s_fence->finished, r);
+ return NULL;
}
-static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
+static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
{
- struct dma_fence *fence = NULL;
+ struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
+ struct amdgpu_device *adev = ring->adev;
+ struct dma_fence *fence = NULL, *finished;
struct amdgpu_job *job;
- int r;
+ int r = 0;
- if (!sched_job) {
- DRM_ERROR("job is null\n");
- return NULL;
- }
job = to_amdgpu_job(sched_job);
-
- BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
+ finished = &job->base.s_fence->finished;
trace_amdgpu_sched_run_job(job);
- r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, &fence);
- if (r)
- DRM_ERROR("Error scheduling IBs (%d)\n", r);
- /* if gpu reset, hw fence will be replaced here */
- dma_fence_put(job->fence);
- job->fence = dma_fence_get(fence);
+ /* Skip job if VRAM is lost and never resubmit gangs */
+ if (job->generation != amdgpu_vm_generation(adev, job->vm) ||
+ (job->job_run_counter && job->gang_submit))
+ dma_fence_set_error(finished, -ECANCELED);
+
+ if (finished->error < 0) {
+ dev_dbg(adev->dev, "Skip scheduling IBs in ring(%s)",
+ ring->name);
+ } else {
+ r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
+ &fence);
+ if (r)
+ dev_err(adev->dev,
+ "Error scheduling IBs (%d) in ring(%s)", r,
+ ring->name);
+ }
+
+ job->job_run_counter++;
amdgpu_job_free_resources(job);
+
+ fence = r ? ERR_PTR(r) : fence;
return fence;
}
-const struct amd_sched_backend_ops amdgpu_sched_ops = {
- .dependency = amdgpu_job_dependency,
+/*
+ * This is a duplicate function from DRM scheduler sched_internal.h.
+ * Plan is to remove it when amdgpu_job_stop_all_jobs_on_sched is removed, due
+ * latter being incorrect and racy.
+ *
+ * See https://lore.kernel.org/amd-gfx/44edde63-7181-44fb-a4f7-94e50514f539@amd.com/
+ */
+static struct drm_sched_job *
+drm_sched_entity_queue_pop(struct drm_sched_entity *entity)
+{
+ struct spsc_node *node;
+
+ node = spsc_queue_pop(&entity->job_queue);
+ if (!node)
+ return NULL;
+
+ return container_of(node, struct drm_sched_job, queue_node);
+}
+
+void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
+{
+ struct drm_sched_job *s_job;
+ struct drm_sched_entity *s_entity = NULL;
+ int i;
+
+ /* Signal all jobs not yet scheduled */
+ for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
+ struct drm_sched_rq *rq = sched->sched_rq[i];
+ spin_lock(&rq->lock);
+ list_for_each_entry(s_entity, &rq->entities, list) {
+ while ((s_job = drm_sched_entity_queue_pop(s_entity))) {
+ struct drm_sched_fence *s_fence = s_job->s_fence;
+
+ dma_fence_signal(&s_fence->scheduled);
+ dma_fence_set_error(&s_fence->finished, -EHWPOISON);
+ dma_fence_signal(&s_fence->finished);
+ }
+ }
+ spin_unlock(&rq->lock);
+ }
+
+ /* Signal all jobs already scheduled to HW */
+ list_for_each_entry(s_job, &sched->pending_list, list) {
+ struct drm_sched_fence *s_fence = s_job->s_fence;
+
+ dma_fence_set_error(&s_fence->finished, -EHWPOISON);
+ dma_fence_signal(&s_fence->finished);
+ }
+}
+
+const struct drm_sched_backend_ops amdgpu_sched_ops = {
+ .prepare_job = amdgpu_job_prepare_job,
.run_job = amdgpu_job_run,
.timedout_job = amdgpu_job_timedout,
.free_job = amdgpu_job_free_cb
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
new file mode 100644
index 000000000000..4a6487eb6cb5
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_JOB_H__
+#define __AMDGPU_JOB_H__
+
+#include <drm/gpu_scheduler.h>
+#include "amdgpu_sync.h"
+#include "amdgpu_ring.h"
+
+/* bit set means command submit involves a preamble IB */
+#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0)
+/* bit set means preamble IB is first presented in belonging context */
+#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1)
+/* bit set means context switch occured */
+#define AMDGPU_HAVE_CTX_SWITCH (1 << 2)
+/* bit set means IB is preempted */
+#define AMDGPU_IB_PREEMPTED (1 << 3)
+
+#define to_amdgpu_job(sched_job) \
+ container_of((sched_job), struct amdgpu_job, base)
+
+#define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
+
+struct amdgpu_fence;
+enum amdgpu_ib_pool_type;
+
+/* Internal kernel job ids. (decreasing values, starting from U64_MAX). */
+#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE (18446744073709551615ULL)
+#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES (18446744073709551614ULL)
+#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE (18446744073709551613ULL)
+#define AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR (18446744073709551612ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER (18446744073709551611ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA (18446744073709551610ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER (18446744073709551609ULL)
+#define AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE (18446744073709551608ULL)
+#define AMDGPU_KERNEL_JOB_ID_MOVE_BLIT (18446744073709551607ULL)
+#define AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER (18446744073709551606ULL)
+#define AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER (18446744073709551605ULL)
+#define AMDGPU_KERNEL_JOB_ID_FLUSH_GPU_TLB (18446744073709551604ULL)
+#define AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP (18446744073709551603ULL)
+#define AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST (18446744073709551602ULL)
+
+struct amdgpu_job {
+ struct drm_sched_job base;
+ struct amdgpu_vm *vm;
+ struct amdgpu_sync explicit_sync;
+ struct amdgpu_fence hw_fence;
+ struct dma_fence *gang_submit;
+ uint32_t preamble_status;
+ uint32_t preemption_status;
+ bool vm_needs_flush;
+ bool gds_switch_needed;
+ bool spm_update_needed;
+ uint64_t vm_pd_addr;
+ unsigned vmid;
+ unsigned pasid;
+ uint32_t gds_base, gds_size;
+ uint32_t gws_base, gws_size;
+ uint32_t oa_base, oa_size;
+ uint64_t generation;
+
+ /* user fence handling */
+ uint64_t uf_addr;
+ uint64_t uf_sequence;
+
+ /* virtual addresses for shadow/GDS/CSA */
+ uint64_t shadow_va;
+ uint64_t csa_va;
+ uint64_t gds_va;
+ bool init_shadow;
+
+ /* job_run_counter >= 1 means a resubmit job */
+ uint32_t job_run_counter;
+
+ /* enforce isolation */
+ bool enforce_isolation;
+ bool run_cleaner_shader;
+
+ uint32_t num_ibs;
+ struct amdgpu_ib ibs[];
+};
+
+static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job)
+{
+ return to_amdgpu_ring(job->base.entity->rq->sched);
+}
+
+int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct drm_sched_entity *entity, void *owner,
+ unsigned int num_ibs, struct amdgpu_job **job,
+ u64 drm_client_id);
+int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
+ struct drm_sched_entity *entity, void *owner,
+ size_t size, enum amdgpu_ib_pool_type pool_type,
+ struct amdgpu_job **job,
+ u64 k_job_id);
+void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
+ struct amdgpu_bo *gws, struct amdgpu_bo *oa);
+void amdgpu_job_free_resources(struct amdgpu_job *job);
+void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
+ struct amdgpu_job *leader);
+void amdgpu_job_free(struct amdgpu_job *job);
+struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job);
+int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
+ struct dma_fence **fence);
+
+void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
new file mode 100644
index 000000000000..6b7d66b6d4cc
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -0,0 +1,607 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_jpeg.h"
+#include "amdgpu_pm.h"
+#include "soc15d.h"
+#include "soc15_common.h"
+
+#define JPEG_IDLE_TIMEOUT msecs_to_jiffies(1000)
+
+static void amdgpu_jpeg_idle_work_handler(struct work_struct *work);
+static void amdgpu_jpeg_reg_dump_fini(struct amdgpu_device *adev);
+
+int amdgpu_jpeg_sw_init(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ INIT_DELAYED_WORK(&adev->jpeg.idle_work, amdgpu_jpeg_idle_work_handler);
+ mutex_init(&adev->jpeg.jpeg_pg_lock);
+ atomic_set(&adev->jpeg.total_submission_cnt, 0);
+
+ if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
+ (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG))
+ adev->jpeg.indirect_sram = true;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ if (adev->jpeg.harvest_config & (1U << i))
+ continue;
+
+ if (adev->jpeg.indirect_sram) {
+ r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->jpeg.inst[i].dpg_sram_bo,
+ &adev->jpeg.inst[i].dpg_sram_gpu_addr,
+ &adev->jpeg.inst[i].dpg_sram_cpu_addr);
+ if (r) {
+ dev_err(adev->dev,
+ "JPEG %d (%d) failed to allocate DPG bo\n", i, r);
+ return r;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev)
+{
+ int i, j;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1U << i))
+ continue;
+
+ amdgpu_bo_free_kernel(
+ &adev->jpeg.inst[i].dpg_sram_bo,
+ &adev->jpeg.inst[i].dpg_sram_gpu_addr,
+ (void **)&adev->jpeg.inst[i].dpg_sram_cpu_addr);
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j)
+ amdgpu_ring_fini(&adev->jpeg.inst[i].ring_dec[j]);
+ }
+
+ if (adev->jpeg.reg_list)
+ amdgpu_jpeg_reg_dump_fini(adev);
+
+ mutex_destroy(&adev->jpeg.jpeg_pg_lock);
+
+ return 0;
+}
+
+int amdgpu_jpeg_suspend(struct amdgpu_device *adev)
+{
+ cancel_delayed_work_sync(&adev->jpeg.idle_work);
+
+ return 0;
+}
+
+int amdgpu_jpeg_resume(struct amdgpu_device *adev)
+{
+ return 0;
+}
+
+static void amdgpu_jpeg_idle_work_handler(struct work_struct *work)
+{
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device, jpeg.idle_work.work);
+ unsigned int fences = 0;
+ unsigned int i, j;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1U << i))
+ continue;
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j)
+ fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec[j]);
+ }
+
+ if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt)) {
+ mutex_lock(&adev->jpeg.jpeg_pg_lock);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG,
+ AMD_PG_STATE_GATE);
+ mutex_unlock(&adev->jpeg.jpeg_pg_lock);
+ } else
+ schedule_delayed_work(&adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT);
+}
+
+void amdgpu_jpeg_ring_begin_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ atomic_inc(&adev->jpeg.total_submission_cnt);
+ cancel_delayed_work_sync(&adev->jpeg.idle_work);
+
+ mutex_lock(&adev->jpeg.jpeg_pg_lock);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_JPEG,
+ AMD_PG_STATE_UNGATE);
+ mutex_unlock(&adev->jpeg.jpeg_pg_lock);
+}
+
+void amdgpu_jpeg_ring_end_use(struct amdgpu_ring *ring)
+{
+ atomic_dec(&ring->adev->jpeg.total_submission_cnt);
+ schedule_delayed_work(&ring->adev->jpeg.idle_work, JPEG_IDLE_TIMEOUT);
+}
+
+int amdgpu_jpeg_dec_ring_test_ring(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ /* JPEG in SRIOV does not support direct register read/write */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ r = amdgpu_ring_alloc(ring, 3);
+ if (r)
+ return r;
+
+ WREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch[ring->pipe], 0xCAFEDEAD);
+ /* Add a read register to make sure the write register is executed. */
+ RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch[ring->pipe]);
+
+ amdgpu_ring_write(ring, PACKET0(adev->jpeg.internal.jpeg_pitch[ring->pipe], 0));
+ amdgpu_ring_write(ring, 0xABADCAFE);
+ amdgpu_ring_commit(ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch[ring->pipe]);
+ if (tmp == 0xABADCAFE)
+ break;
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
+ return r;
+}
+
+static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle,
+ struct dma_fence **fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct dma_fence *f = NULL;
+ const unsigned ib_size_dw = 16;
+ int i, r;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
+ if (r)
+ return r;
+
+ ib = &job->ibs[0];
+
+ ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch[ring->pipe], 0, 0, PACKETJ_TYPE0);
+ ib->ptr[1] = 0xDEADBEEF;
+ for (i = 2; i < 16; i += 2) {
+ ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
+ ib->ptr[i+1] = 0;
+ }
+ ib->length_dw = 16;
+
+ r = amdgpu_job_submit_direct(job, ring, &f);
+ if (r)
+ goto err;
+
+ if (fence)
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
+
+ return 0;
+
+err:
+ amdgpu_job_free(job);
+ return r;
+}
+
+int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t tmp = 0;
+ unsigned i;
+ struct dma_fence *fence = NULL;
+ long r = 0;
+
+ r = amdgpu_jpeg_dec_set_reg(ring, 1, &fence);
+ if (r)
+ goto error;
+
+ r = dma_fence_wait_timeout(fence, false, timeout);
+ if (r == 0) {
+ r = -ETIMEDOUT;
+ goto error;
+ } else if (r < 0) {
+ goto error;
+ } else {
+ r = 0;
+ }
+
+ if (!amdgpu_sriov_vf(adev)) {
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(adev->jpeg.inst[ring->me].external.jpeg_pitch[ring->pipe]);
+ if (tmp == 0xDEADBEEF)
+ break;
+ udelay(1);
+ if (amdgpu_emu_mode == 1)
+ udelay(10);
+ }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+ }
+
+ dma_fence_put(fence);
+error:
+ return r;
+}
+
+int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->jpeg.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+
+ return 0;
+}
+
+int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r, i;
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ if (amdgpu_ras_is_supported(adev, ras_block->block)) {
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ if (adev->jpeg.harvest_config & (1 << i) ||
+ !adev->jpeg.inst[i].ras_poison_irq.funcs)
+ continue;
+
+ r = amdgpu_irq_get(adev, &adev->jpeg.inst[i].ras_poison_irq, 0);
+ if (r)
+ goto late_fini;
+ }
+ }
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+ return r;
+}
+
+int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_jpeg_ras *ras;
+
+ if (!adev->jpeg.ras)
+ return 0;
+
+ ras = adev->jpeg.ras;
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register jpeg ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "jpeg");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__JPEG;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
+ adev->jpeg.ras_if = &ras->ras_block.ras_comm;
+
+ if (!ras->ras_block.ras_late_init)
+ ras->ras_block.ras_late_init = amdgpu_jpeg_ras_late_init;
+
+ return 0;
+}
+
+int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
+ enum AMDGPU_UCODE_ID ucode_id)
+{
+ struct amdgpu_firmware_info ucode = {
+ .ucode_id = AMDGPU_UCODE_ID_JPEG_RAM,
+ .mc_addr = adev->jpeg.inst[inst_idx].dpg_sram_gpu_addr,
+ .ucode_size = ((uintptr_t)adev->jpeg.inst[inst_idx].dpg_sram_curr_addr -
+ (uintptr_t)adev->jpeg.inst[inst_idx].dpg_sram_cpu_addr),
+ };
+
+ return psp_execute_ip_fw_load(&adev->psp, &ucode);
+}
+
+/*
+ * debugfs for to enable/disable jpeg job submission to specific core.
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_jpeg_sched_mask_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i, j;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+
+ mask = (1ULL << (adev->jpeg.num_jpeg_inst * adev->jpeg.num_jpeg_rings)) - 1;
+ if ((val & mask) == 0)
+ return -EINVAL;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+ if (val & (BIT_ULL(1) << ((i * adev->jpeg.num_jpeg_rings) + j)))
+ ring->sched.ready = true;
+ else
+ ring->sched.ready = false;
+ }
+ }
+ /* publish sched.ready flag update effective immediately across smp */
+ smp_rmb();
+ return 0;
+}
+
+static int amdgpu_debugfs_jpeg_sched_mask_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i, j;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; ++j) {
+ ring = &adev->jpeg.inst[i].ring_dec[j];
+ if (ring->sched.ready)
+ mask |= 1ULL << ((i * adev->jpeg.num_jpeg_rings) + j);
+ }
+ }
+ *val = mask;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_jpeg_sched_mask_fops,
+ amdgpu_debugfs_jpeg_sched_mask_get,
+ amdgpu_debugfs_jpeg_sched_mask_set, "%llx\n");
+
+#endif
+
+void amdgpu_debugfs_jpeg_sched_mask_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+ char name[32];
+
+ if (!(adev->jpeg.num_jpeg_inst > 1) && !(adev->jpeg.num_jpeg_rings > 1))
+ return;
+ sprintf(name, "amdgpu_jpeg_sched_mask");
+ debugfs_create_file(name, 0600, root, adev,
+ &amdgpu_debugfs_jpeg_sched_mask_fops);
+#endif
+}
+
+static ssize_t amdgpu_get_jpeg_reset_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (!adev)
+ return -ENODEV;
+
+ return amdgpu_show_reset_mask(buf, adev->jpeg.supported_reset);
+}
+
+static DEVICE_ATTR(jpeg_reset_mask, 0444,
+ amdgpu_get_jpeg_reset_mask, NULL);
+
+int amdgpu_jpeg_sysfs_reset_mask_init(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (adev->jpeg.num_jpeg_inst) {
+ r = device_create_file(adev->dev, &dev_attr_jpeg_reset_mask);
+ if (r)
+ return r;
+ }
+
+ return r;
+}
+
+void amdgpu_jpeg_sysfs_reset_mask_fini(struct amdgpu_device *adev)
+{
+ if (adev->dev->kobj.sd) {
+ if (adev->jpeg.num_jpeg_inst)
+ device_remove_file(adev->dev, &dev_attr_jpeg_reset_mask);
+ }
+}
+
+int amdgpu_jpeg_reg_dump_init(struct amdgpu_device *adev,
+ const struct amdgpu_hwip_reg_entry *reg, u32 count)
+{
+ adev->jpeg.ip_dump = kcalloc(adev->jpeg.num_jpeg_inst * count,
+ sizeof(uint32_t), GFP_KERNEL);
+ if (!adev->jpeg.ip_dump) {
+ dev_err(adev->dev,
+ "Failed to allocate memory for JPEG IP Dump\n");
+ return -ENOMEM;
+ }
+ adev->jpeg.reg_list = reg;
+ adev->jpeg.reg_count = count;
+
+ return 0;
+}
+
+static void amdgpu_jpeg_reg_dump_fini(struct amdgpu_device *adev)
+{
+ kfree(adev->jpeg.ip_dump);
+ adev->jpeg.reg_list = NULL;
+ adev->jpeg.reg_count = 0;
+}
+
+void amdgpu_jpeg_dump_ip_state(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ u32 inst_off, inst_id, is_powered;
+ int i, j;
+
+ if (!adev->jpeg.ip_dump)
+ return;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ inst_id = GET_INST(JPEG, i);
+ inst_off = i * adev->jpeg.reg_count;
+ /* check power status from UVD_JPEG_POWER_STATUS */
+ adev->jpeg.ip_dump[inst_off] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->jpeg.reg_list[0],
+ inst_id));
+ is_powered = ((adev->jpeg.ip_dump[inst_off] & 0x1) != 1);
+
+ if (is_powered)
+ for (j = 1; j < adev->jpeg.reg_count; j++)
+ adev->jpeg.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->jpeg.reg_list[j],
+ inst_id));
+ }
+}
+
+void amdgpu_jpeg_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ u32 inst_off, is_powered;
+ int i, j;
+
+ if (!adev->jpeg.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->jpeg.num_jpeg_inst);
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ if (adev->jpeg.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:JPEG%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * adev->jpeg.reg_count;
+ is_powered = ((adev->jpeg.ip_dump[inst_off] & 0x1) != 1);
+
+ if (is_powered) {
+ drm_printf(p, "Active Instance:JPEG%d\n", i);
+ for (j = 0; j < adev->jpeg.reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", adev->jpeg.reg_list[j].reg_name,
+ adev->jpeg.ip_dump[inst_off + j]);
+ } else
+ drm_printf(p, "\nInactive Instance:JPEG%d\n", i);
+ }
+}
+
+static inline bool amdgpu_jpeg_reg_valid(u32 reg)
+{
+ if (reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END ||
+ (reg >= JPEG_ATOMIC_RANGE_START && reg <= JPEG_ATOMIC_RANGE_END))
+ return false;
+ else
+ return true;
+}
+
+/**
+ * amdgpu_jpeg_dec_parse_cs - command submission parser
+ *
+ * @parser: Command submission parser context
+ * @job: the job to parse
+ * @ib: the IB to parse
+ *
+ * Parse the command stream, return -EINVAL for invalid packet,
+ * 0 otherwise
+ */
+
+int amdgpu_jpeg_dec_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
+{
+ u32 i, reg, res, cond, type;
+ struct amdgpu_device *adev = parser->adev;
+
+ for (i = 0; i < ib->length_dw ; i += 2) {
+ reg = CP_PACKETJ_GET_REG(ib->ptr[i]);
+ res = CP_PACKETJ_GET_RES(ib->ptr[i]);
+ cond = CP_PACKETJ_GET_COND(ib->ptr[i]);
+ type = CP_PACKETJ_GET_TYPE(ib->ptr[i]);
+
+ if (res) /* only support 0 at the moment */
+ return -EINVAL;
+
+ switch (type) {
+ case PACKETJ_TYPE0:
+ if (cond != PACKETJ_CONDITION_CHECK0 ||
+ !amdgpu_jpeg_reg_valid(reg)) {
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ }
+ break;
+ case PACKETJ_TYPE3:
+ if (cond != PACKETJ_CONDITION_CHECK3 ||
+ !amdgpu_jpeg_reg_valid(reg)) {
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ }
+ break;
+ case PACKETJ_TYPE6:
+ if (ib->ptr[i] == CP_PACKETJ_NOP)
+ continue;
+ dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]);
+ return -EINVAL;
+ default:
+ dev_err(adev->dev, "Unknown packet type %d !\n", type);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
new file mode 100644
index 000000000000..346ae0ab09d3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_JPEG_H__
+#define __AMDGPU_JPEG_H__
+
+#include "amdgpu_ras.h"
+#include "amdgpu_cs.h"
+
+#define AMDGPU_MAX_JPEG_INSTANCES 4
+#define AMDGPU_MAX_JPEG_RINGS 10
+#define AMDGPU_MAX_JPEG_RINGS_4_0_3 8
+
+#define JPEG_REG_RANGE_START 0x4000
+#define JPEG_REG_RANGE_END 0x41c2
+#define JPEG_ATOMIC_RANGE_START 0x4120
+#define JPEG_ATOMIC_RANGE_END 0x412A
+
+
+#define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0)
+#define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1)
+
+#define WREG32_SOC15_JPEG_DPG_MODE(inst_idx, offset, value, indirect) \
+ do { \
+ if (!indirect) { \
+ WREG32_SOC15(JPEG, GET_INST(JPEG, inst_idx), \
+ mmUVD_DPG_LMA_DATA, value); \
+ WREG32_SOC15( \
+ JPEG, GET_INST(JPEG, inst_idx), \
+ mmUVD_DPG_LMA_CTL, \
+ (UVD_DPG_LMA_CTL__READ_WRITE_MASK | \
+ offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT | \
+ indirect << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
+ } else { \
+ *adev->jpeg.inst[inst_idx].dpg_sram_curr_addr++ = \
+ offset; \
+ *adev->jpeg.inst[inst_idx].dpg_sram_curr_addr++ = \
+ value; \
+ } \
+ } while (0)
+
+#define RREG32_SOC15_JPEG_DPG_MODE(inst_idx, offset, mask_en) \
+ ({ \
+ WREG32_SOC15(JPEG, inst_idx, mmUVD_DPG_LMA_CTL, \
+ (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
+ mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
+ offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
+ RREG32_SOC15(JPEG, inst_idx, mmUVD_DPG_LMA_DATA); \
+ })
+
+#define WREG32_SOC24_JPEG_DPG_MODE(inst_idx, offset, value, indirect) \
+ do { \
+ WREG32_SOC15(JPEG, GET_INST(JPEG, inst_idx), \
+ regUVD_DPG_LMA_DATA, value); \
+ WREG32_SOC15(JPEG, GET_INST(JPEG, inst_idx), \
+ regUVD_DPG_LMA_MASK, 0xFFFFFFFF); \
+ WREG32_SOC15( \
+ JPEG, GET_INST(JPEG, inst_idx), \
+ regUVD_DPG_LMA_CTL, \
+ (UVD_DPG_LMA_CTL__READ_WRITE_MASK | \
+ offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT | \
+ indirect << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
+ } while (0)
+
+#define RREG32_SOC24_JPEG_DPG_MODE(inst_idx, offset, mask_en) \
+ do { \
+ WREG32_SOC15(JPEG, GET_INST(JPEG, inst_idx), \
+ regUVD_DPG_LMA_MASK, 0xFFFFFFFF); \
+ WREG32_SOC15(JPEG, GET_INST(JPEG, inst_idx), \
+ regUVD_DPG_LMA_CTL, \
+ (UVD_DPG_LMA_CTL__MASK_EN_MASK | \
+ offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
+ RREG32_SOC15(JPEG, inst_idx, regUVD_DPG_LMA_DATA); \
+ } while (0)
+
+#define ADD_SOC24_JPEG_TO_DPG_SRAM(inst_idx, offset, value, indirect) \
+ do { \
+ *adev->jpeg.inst[inst_idx].dpg_sram_curr_addr++ = offset; \
+ *adev->jpeg.inst[inst_idx].dpg_sram_curr_addr++ = value; \
+ } while (0)
+
+struct amdgpu_hwip_reg_entry;
+
+enum amdgpu_jpeg_caps {
+ AMDGPU_JPEG_RRMT_ENABLED,
+};
+
+#define AMDGPU_JPEG_CAPS(caps) BIT(AMDGPU_JPEG_##caps)
+
+struct amdgpu_jpeg_reg{
+ unsigned jpeg_pitch[AMDGPU_MAX_JPEG_RINGS];
+};
+
+struct amdgpu_jpeg_inst {
+ struct amdgpu_ring ring_dec[AMDGPU_MAX_JPEG_RINGS];
+ struct amdgpu_irq_src irq;
+ struct amdgpu_irq_src ras_poison_irq;
+ struct amdgpu_jpeg_reg external;
+ struct amdgpu_bo *dpg_sram_bo;
+ struct dpg_pause_state pause_state;
+ void *dpg_sram_cpu_addr;
+ uint64_t dpg_sram_gpu_addr;
+ uint32_t *dpg_sram_curr_addr;
+ uint8_t aid_id;
+};
+
+struct amdgpu_jpeg_ras {
+ struct amdgpu_ras_block_object ras_block;
+};
+
+struct amdgpu_jpeg {
+ uint8_t num_jpeg_inst;
+ struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES];
+ unsigned num_jpeg_rings;
+ struct amdgpu_jpeg_reg internal;
+ unsigned harvest_config;
+ struct delayed_work idle_work;
+ enum amd_powergating_state cur_state;
+ struct mutex jpeg_pg_lock;
+ atomic_t total_submission_cnt;
+ struct ras_common_if *ras_if;
+ struct amdgpu_jpeg_ras *ras;
+
+ uint16_t inst_mask;
+ uint8_t num_inst_per_aid;
+ bool indirect_sram;
+ uint32_t supported_reset;
+ uint32_t caps;
+ u32 *ip_dump;
+ u32 reg_count;
+ const struct amdgpu_hwip_reg_entry *reg_list;
+};
+
+int amdgpu_jpeg_sw_init(struct amdgpu_device *adev);
+int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev);
+int amdgpu_jpeg_suspend(struct amdgpu_device *adev);
+int amdgpu_jpeg_resume(struct amdgpu_device *adev);
+
+void amdgpu_jpeg_ring_begin_use(struct amdgpu_ring *ring);
+void amdgpu_jpeg_ring_end_use(struct amdgpu_ring *ring);
+
+int amdgpu_jpeg_dec_ring_test_ring(struct amdgpu_ring *ring);
+int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+
+int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
+int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block);
+int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev);
+int amdgpu_jpeg_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
+ enum AMDGPU_UCODE_ID ucode_id);
+void amdgpu_debugfs_jpeg_sched_mask_init(struct amdgpu_device *adev);
+int amdgpu_jpeg_sysfs_reset_mask_init(struct amdgpu_device *adev);
+void amdgpu_jpeg_sysfs_reset_mask_fini(struct amdgpu_device *adev);
+int amdgpu_jpeg_reg_dump_init(struct amdgpu_device *adev,
+ const struct amdgpu_hwip_reg_entry *reg, u32 count);
+void amdgpu_jpeg_dump_ip_state(struct amdgpu_ip_block *ip_block);
+void amdgpu_jpeg_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p);
+int amdgpu_jpeg_dec_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib);
+
+#endif /*__AMDGPU_JPEG_H__*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 96c341670782..a9327472c651 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -25,16 +25,51 @@
* Alex Deucher
* Jerome Glisse
*/
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include <drm/amdgpu_drm.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
+#include "atom.h"
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include "amdgpu_amdkfd.h"
+#include "amdgpu_gem.h"
+#include "amdgpu_display.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_reset.h"
+#include "amd_pcie.h"
+#include "amdgpu_userq.h"
+
+void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
+{
+ struct amdgpu_gpu_instance *gpu_instance;
+ int i;
+
+ mutex_lock(&mgpu_info.mutex);
+
+ for (i = 0; i < mgpu_info.num_gpu; i++) {
+ gpu_instance = &(mgpu_info.gpu_ins[i]);
+ if (gpu_instance->adev == adev) {
+ mgpu_info.gpu_ins[i] =
+ mgpu_info.gpu_ins[mgpu_info.num_gpu - 1];
+ mgpu_info.num_gpu--;
+ if (adev->flags & AMD_IS_APU)
+ mgpu_info.num_apu--;
+ else
+ mgpu_info.num_dgpu--;
+ break;
+ }
+ }
+
+ mutex_unlock(&mgpu_info.mutex);
+}
/**
* amdgpu_driver_unload_kms - Main unload function for KMS.
@@ -46,60 +81,63 @@
*/
void amdgpu_driver_unload_kms(struct drm_device *dev)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
if (adev == NULL)
return;
+ amdgpu_unregister_gpu_instance(adev);
+
if (adev->rmmio == NULL)
- goto done_free;
+ return;
- if (amdgpu_sriov_vf(adev))
- amdgpu_virt_request_full_gpu(adev, false);
+ if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DRV_UNLOAD))
+ DRM_WARN("smart shift update failed\n");
- if (amdgpu_device_is_px(dev)) {
- pm_runtime_get_sync(dev->dev);
- pm_runtime_forbid(dev->dev);
- }
+ amdgpu_acpi_fini(adev);
+ amdgpu_device_fini_hw(adev);
+}
+
+void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
+{
+ struct amdgpu_gpu_instance *gpu_instance;
- amdgpu_amdkfd_device_fini(adev);
+ mutex_lock(&mgpu_info.mutex);
- amdgpu_acpi_fini(adev);
+ if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) {
+ DRM_ERROR("Cannot register more gpu instance\n");
+ mutex_unlock(&mgpu_info.mutex);
+ return;
+ }
- amdgpu_device_fini(adev);
+ gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]);
+ gpu_instance->adev = adev;
+ gpu_instance->mgpu_fan_enabled = 0;
-done_free:
- kfree(adev);
- dev->dev_private = NULL;
+ mgpu_info.num_gpu++;
+ if (adev->flags & AMD_IS_APU)
+ mgpu_info.num_apu++;
+ else
+ mgpu_info.num_dgpu++;
+
+ mutex_unlock(&mgpu_info.mutex);
}
/**
* amdgpu_driver_load_kms - Main load function for KMS.
*
- * @dev: drm dev pointer
+ * @adev: pointer to struct amdgpu_device
* @flags: device flags
*
* This is the main load function for KMS (all asics).
* Returns 0 on success, error on failure.
*/
-int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
+int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
{
- struct amdgpu_device *adev;
+ struct drm_device *dev;
int r, acpi_status;
- adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
- if (adev == NULL) {
- return -ENOMEM;
- }
- dev->dev_private = (void *)adev;
-
- if ((amdgpu_runtime_pm != 0) &&
- amdgpu_has_atpx() &&
- (amdgpu_is_atpx_hybrid() ||
- amdgpu_has_atpx_dgpu_power_cntl()) &&
- ((flags & AMD_IS_APU) == 0) &&
- !pci_is_thunderbolt_attached(dev->pdev))
- flags |= AMD_IS_PX;
+ dev = adev_to_drm(adev);
/* amdgpu_device_init should report only fatal error
* like memory allocation failure or iomapping failure,
@@ -107,49 +145,70 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
* properly initialize the GPU MC controller and permit
* VRAM allocation
*/
- r = amdgpu_device_init(adev, dev, dev->pdev, flags);
+ r = amdgpu_device_init(adev, flags);
if (r) {
- dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
+ dev_err(dev->dev, "Fatal error during GPU init\n");
goto out;
}
+ amdgpu_device_detect_runtime_pm_mode(adev);
+
/* Call ACPI methods: require modeset init
* but failure is not fatal
*/
- if (!r) {
- acpi_status = amdgpu_acpi_init(adev);
- if (acpi_status)
- dev_dbg(&dev->pdev->dev,
- "Error during ACPI methods call\n");
- }
- amdgpu_amdkfd_load_interface(adev);
- amdgpu_amdkfd_device_probe(adev);
- amdgpu_amdkfd_device_init(adev);
-
- if (amdgpu_device_is_px(dev)) {
- pm_runtime_use_autosuspend(dev->dev);
- pm_runtime_set_autosuspend_delay(dev->dev, 5000);
- pm_runtime_set_active(dev->dev);
- pm_runtime_allow(dev->dev);
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
- }
+ acpi_status = amdgpu_acpi_init(adev);
+ if (acpi_status)
+ dev_dbg(dev->dev, "Error during ACPI methods call\n");
- if (amdgpu_sriov_vf(adev))
- amdgpu_virt_release_full_gpu(adev, true);
+ if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DRV_LOAD))
+ DRM_WARN("smart shift update failed\n");
out:
- if (r) {
- /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
- if (adev->rmmio && amdgpu_device_is_px(dev))
- pm_runtime_put_noidle(dev->dev);
+ if (r)
amdgpu_driver_unload_kms(dev);
- }
return r;
}
+static enum amd_ip_block_type
+ amdgpu_ip_get_block_type(struct amdgpu_device *adev, uint32_t ip)
+{
+ enum amd_ip_block_type type;
+
+ switch (ip) {
+ case AMDGPU_HW_IP_GFX:
+ type = AMD_IP_BLOCK_TYPE_GFX;
+ break;
+ case AMDGPU_HW_IP_COMPUTE:
+ type = AMD_IP_BLOCK_TYPE_GFX;
+ break;
+ case AMDGPU_HW_IP_DMA:
+ type = AMD_IP_BLOCK_TYPE_SDMA;
+ break;
+ case AMDGPU_HW_IP_UVD:
+ case AMDGPU_HW_IP_UVD_ENC:
+ type = AMD_IP_BLOCK_TYPE_UVD;
+ break;
+ case AMDGPU_HW_IP_VCE:
+ type = AMD_IP_BLOCK_TYPE_VCE;
+ break;
+ case AMDGPU_HW_IP_VCN_DEC:
+ case AMDGPU_HW_IP_VCN_ENC:
+ type = AMD_IP_BLOCK_TYPE_VCN;
+ break;
+ case AMDGPU_HW_IP_VCN_JPEG:
+ type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
+ AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
+ break;
+ default:
+ type = AMD_IP_BLOCK_TYPE_NUM;
+ break;
+ }
+
+ return type;
+}
+
static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
struct drm_amdgpu_query_fw *query_fw,
struct amdgpu_device *adev)
@@ -163,8 +222,12 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->ver = adev->uvd.fw_version;
fw_info->feature = 0;
break;
+ case AMDGPU_INFO_FW_VCN:
+ fw_info->ver = adev->vcn.fw_version;
+ fw_info->feature = 0;
+ break;
case AMDGPU_INFO_FW_GMC:
- fw_info->ver = adev->mc.fw_version;
+ fw_info->ver = adev->gmc.fw_version;
fw_info->feature = 0;
break;
case AMDGPU_INFO_FW_GFX_ME:
@@ -183,6 +246,26 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->ver = adev->gfx.rlc_fw_version;
fw_info->feature = adev->gfx.rlc_feature_version;
break;
+ case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL:
+ fw_info->ver = adev->gfx.rlc_srlc_fw_version;
+ fw_info->feature = adev->gfx.rlc_srlc_feature_version;
+ break;
+ case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM:
+ fw_info->ver = adev->gfx.rlc_srlg_fw_version;
+ fw_info->feature = adev->gfx.rlc_srlg_feature_version;
+ break;
+ case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM:
+ fw_info->ver = adev->gfx.rlc_srls_fw_version;
+ fw_info->feature = adev->gfx.rlc_srls_feature_version;
+ break;
+ case AMDGPU_INFO_FW_GFX_RLCP:
+ fw_info->ver = adev->gfx.rlcp_ucode_version;
+ fw_info->feature = adev->gfx.rlcp_ucode_feature_version;
+ break;
+ case AMDGPU_INFO_FW_GFX_RLCV:
+ fw_info->ver = adev->gfx.rlcv_ucode_version;
+ fw_info->feature = adev->gfx.rlcv_ucode_feature_version;
+ break;
case AMDGPU_INFO_FW_GFX_MEC:
if (query_fw->index == 0) {
fw_info->ver = adev->gfx.mec_fw_version;
@@ -197,6 +280,43 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->ver = adev->pm.fw_version;
fw_info->feature = 0;
break;
+ case AMDGPU_INFO_FW_TA:
+ switch (query_fw->index) {
+ case TA_FW_TYPE_PSP_XGMI:
+ fw_info->ver = adev->psp.xgmi_context.context.bin_desc.fw_version;
+ fw_info->feature = adev->psp.xgmi_context.context
+ .bin_desc.feature_version;
+ break;
+ case TA_FW_TYPE_PSP_RAS:
+ fw_info->ver = adev->psp.ras_context.context.bin_desc.fw_version;
+ fw_info->feature = adev->psp.ras_context.context
+ .bin_desc.feature_version;
+ break;
+ case TA_FW_TYPE_PSP_HDCP:
+ fw_info->ver = adev->psp.hdcp_context.context.bin_desc.fw_version;
+ fw_info->feature = adev->psp.hdcp_context.context
+ .bin_desc.feature_version;
+ break;
+ case TA_FW_TYPE_PSP_DTM:
+ fw_info->ver = adev->psp.dtm_context.context.bin_desc.fw_version;
+ fw_info->feature = adev->psp.dtm_context.context
+ .bin_desc.feature_version;
+ break;
+ case TA_FW_TYPE_PSP_RAP:
+ fw_info->ver = adev->psp.rap_context.context.bin_desc.fw_version;
+ fw_info->feature = adev->psp.rap_context.context
+ .bin_desc.feature_version;
+ break;
+ case TA_FW_TYPE_PSP_SECUREDISPLAY:
+ fw_info->ver = adev->psp.securedisplay_context.context.bin_desc.fw_version;
+ fw_info->feature =
+ adev->psp.securedisplay_context.context.bin_desc
+ .feature_version;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
case AMDGPU_INFO_FW_SDMA:
if (query_fw->index >= adev->sdma.num_instances)
return -EINVAL;
@@ -204,16 +324,274 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
break;
case AMDGPU_INFO_FW_SOS:
- fw_info->ver = adev->psp.sos_fw_version;
- fw_info->feature = adev->psp.sos_feature_version;
+ fw_info->ver = adev->psp.sos.fw_version;
+ fw_info->feature = adev->psp.sos.feature_version;
break;
case AMDGPU_INFO_FW_ASD:
- fw_info->ver = adev->psp.asd_fw_version;
- fw_info->feature = adev->psp.asd_feature_version;
+ fw_info->ver = adev->psp.asd_context.bin_desc.fw_version;
+ fw_info->feature = adev->psp.asd_context.bin_desc.feature_version;
+ break;
+ case AMDGPU_INFO_FW_DMCU:
+ fw_info->ver = adev->dm.dmcu_fw_version;
+ fw_info->feature = 0;
+ break;
+ case AMDGPU_INFO_FW_DMCUB:
+ fw_info->ver = adev->dm.dmcub_fw_version;
+ fw_info->feature = 0;
+ break;
+ case AMDGPU_INFO_FW_TOC:
+ fw_info->ver = adev->psp.toc.fw_version;
+ fw_info->feature = adev->psp.toc.feature_version;
+ break;
+ case AMDGPU_INFO_FW_CAP:
+ fw_info->ver = adev->psp.cap_fw_version;
+ fw_info->feature = adev->psp.cap_feature_version;
+ break;
+ case AMDGPU_INFO_FW_MES_KIQ:
+ fw_info->ver = adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK;
+ fw_info->feature = (adev->mes.kiq_version & AMDGPU_MES_FEAT_VERSION_MASK)
+ >> AMDGPU_MES_FEAT_VERSION_SHIFT;
+ break;
+ case AMDGPU_INFO_FW_MES:
+ fw_info->ver = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
+ fw_info->feature = (adev->mes.sched_version & AMDGPU_MES_FEAT_VERSION_MASK)
+ >> AMDGPU_MES_FEAT_VERSION_SHIFT;
+ break;
+ case AMDGPU_INFO_FW_IMU:
+ fw_info->ver = adev->gfx.imu_fw_version;
+ fw_info->feature = 0;
+ break;
+ case AMDGPU_INFO_FW_VPE:
+ fw_info->ver = adev->vpe.fw_version;
+ fw_info->feature = adev->vpe.feature_version;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_userq_metadata_info_gfx(struct amdgpu_device *adev,
+ struct drm_amdgpu_info *info,
+ struct drm_amdgpu_info_uq_metadata_gfx *meta)
+{
+ int ret = -EOPNOTSUPP;
+
+ if (adev->gfx.funcs->get_gfx_shadow_info) {
+ struct amdgpu_gfx_shadow_info shadow = {};
+
+ adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow, true);
+ meta->shadow_size = shadow.shadow_size;
+ meta->shadow_alignment = shadow.shadow_alignment;
+ meta->csa_size = shadow.csa_size;
+ meta->csa_alignment = shadow.csa_alignment;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int amdgpu_hw_ip_info(struct amdgpu_device *adev,
+ struct drm_amdgpu_info *info,
+ struct drm_amdgpu_info_hw_ip *result)
+{
+ uint32_t ib_start_alignment = 0;
+ uint32_t ib_size_alignment = 0;
+ enum amd_ip_block_type type;
+ unsigned int num_rings = 0;
+ uint32_t num_slots = 0;
+ unsigned int i, j;
+
+ if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
+ return -EINVAL;
+
+ switch (info->query_hw_ip.type) {
+ case AMDGPU_HW_IP_GFX:
+ type = AMD_IP_BLOCK_TYPE_GFX;
+ for (i = 0; i < adev->gfx.num_gfx_rings; i++)
+ if (adev->gfx.gfx_ring[i].sched.ready &&
+ !adev->gfx.gfx_ring[i].no_user_submission)
+ ++num_rings;
+
+ if (!adev->gfx.disable_uq) {
+ for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++)
+ num_slots += hweight32(adev->mes.gfx_hqd_mask[i]);
+ }
+
+ ib_start_alignment = 32;
+ ib_size_alignment = 32;
+ break;
+ case AMDGPU_HW_IP_COMPUTE:
+ type = AMD_IP_BLOCK_TYPE_GFX;
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+ if (adev->gfx.compute_ring[i].sched.ready &&
+ !adev->gfx.compute_ring[i].no_user_submission)
+ ++num_rings;
+
+ if (!adev->sdma.disable_uq) {
+ for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++)
+ num_slots += hweight32(adev->mes.compute_hqd_mask[i]);
+ }
+
+ ib_start_alignment = 32;
+ ib_size_alignment = 32;
+ break;
+ case AMDGPU_HW_IP_DMA:
+ type = AMD_IP_BLOCK_TYPE_SDMA;
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ if (adev->sdma.instance[i].ring.sched.ready &&
+ !adev->sdma.instance[i].ring.no_user_submission)
+ ++num_rings;
+
+ if (!adev->gfx.disable_uq) {
+ for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++)
+ num_slots += hweight32(adev->mes.sdma_hqd_mask[i]);
+ }
+
+ ib_start_alignment = 256;
+ ib_size_alignment = 4;
+ break;
+ case AMDGPU_HW_IP_UVD:
+ type = AMD_IP_BLOCK_TYPE_UVD;
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
+
+ if (adev->uvd.inst[i].ring.sched.ready &&
+ !adev->uvd.inst[i].ring.no_user_submission)
+ ++num_rings;
+ }
+ ib_start_alignment = 256;
+ ib_size_alignment = 64;
+ break;
+ case AMDGPU_HW_IP_VCE:
+ type = AMD_IP_BLOCK_TYPE_VCE;
+ for (i = 0; i < adev->vce.num_rings; i++)
+ if (adev->vce.ring[i].sched.ready &&
+ !adev->vce.ring[i].no_user_submission)
+ ++num_rings;
+ ib_start_alignment = 256;
+ ib_size_alignment = 4;
+ break;
+ case AMDGPU_HW_IP_UVD_ENC:
+ type = AMD_IP_BLOCK_TYPE_UVD;
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
+
+ for (j = 0; j < adev->uvd.num_enc_rings; j++)
+ if (adev->uvd.inst[i].ring_enc[j].sched.ready &&
+ !adev->uvd.inst[i].ring_enc[j].no_user_submission)
+ ++num_rings;
+ }
+ ib_start_alignment = 256;
+ ib_size_alignment = 4;
+ break;
+ case AMDGPU_HW_IP_VCN_DEC:
+ type = AMD_IP_BLOCK_TYPE_VCN;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ if (adev->vcn.inst[i].ring_dec.sched.ready &&
+ !adev->vcn.inst[i].ring_dec.no_user_submission)
+ ++num_rings;
+ }
+ ib_start_alignment = 256;
+ ib_size_alignment = 64;
+ break;
+ case AMDGPU_HW_IP_VCN_ENC:
+ type = AMD_IP_BLOCK_TYPE_VCN;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; j++)
+ if (adev->vcn.inst[i].ring_enc[j].sched.ready &&
+ !adev->vcn.inst[i].ring_enc[j].no_user_submission)
+ ++num_rings;
+ }
+ ib_start_alignment = 256;
+ ib_size_alignment = 4;
+ break;
+ case AMDGPU_HW_IP_VCN_JPEG:
+ type = (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_JPEG)) ?
+ AMD_IP_BLOCK_TYPE_JPEG : AMD_IP_BLOCK_TYPE_VCN;
+
+ for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) {
+ if (adev->jpeg.harvest_config & (1 << i))
+ continue;
+
+ for (j = 0; j < adev->jpeg.num_jpeg_rings; j++)
+ if (adev->jpeg.inst[i].ring_dec[j].sched.ready &&
+ !adev->jpeg.inst[i].ring_dec[j].no_user_submission)
+ ++num_rings;
+ }
+ ib_start_alignment = 256;
+ ib_size_alignment = 64;
+ break;
+ case AMDGPU_HW_IP_VPE:
+ type = AMD_IP_BLOCK_TYPE_VPE;
+ if (adev->vpe.ring.sched.ready &&
+ !adev->vpe.ring.no_user_submission)
+ ++num_rings;
+ ib_start_alignment = 256;
+ ib_size_alignment = 4;
break;
default:
return -EINVAL;
}
+
+ for (i = 0; i < adev->num_ip_blocks; i++)
+ if (adev->ip_blocks[i].version->type == type &&
+ adev->ip_blocks[i].status.valid)
+ break;
+
+ if (i == adev->num_ip_blocks)
+ return 0;
+
+ num_rings = min(amdgpu_ctx_num_entities[info->query_hw_ip.type],
+ num_rings);
+
+ result->hw_ip_version_major = adev->ip_blocks[i].version->major;
+ result->hw_ip_version_minor = adev->ip_blocks[i].version->minor;
+
+ if (adev->asic_type >= CHIP_VEGA10) {
+ switch (type) {
+ case AMD_IP_BLOCK_TYPE_GFX:
+ result->ip_discovery_version =
+ IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, GC_HWIP, 0));
+ break;
+ case AMD_IP_BLOCK_TYPE_SDMA:
+ result->ip_discovery_version =
+ IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, SDMA0_HWIP, 0));
+ break;
+ case AMD_IP_BLOCK_TYPE_UVD:
+ case AMD_IP_BLOCK_TYPE_VCN:
+ case AMD_IP_BLOCK_TYPE_JPEG:
+ result->ip_discovery_version =
+ IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, UVD_HWIP, 0));
+ break;
+ case AMD_IP_BLOCK_TYPE_VCE:
+ result->ip_discovery_version =
+ IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, VCE_HWIP, 0));
+ break;
+ case AMD_IP_BLOCK_TYPE_VPE:
+ result->ip_discovery_version =
+ IP_VERSION_MAJ_MIN_REV(amdgpu_ip_version(adev, VPE_HWIP, 0));
+ break;
+ default:
+ result->ip_discovery_version = 0;
+ break;
+ }
+ } else {
+ result->ip_discovery_version = 0;
+ }
+ result->capabilities_flags = 0;
+ result->available_rings = (1 << num_rings) - 1;
+ result->userq_num_slots = num_slots;
+ result->ib_start_alignment = ib_start_alignment;
+ result->ib_size_alignment = ib_size_alignment;
return 0;
}
@@ -223,7 +601,7 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
/**
* amdgpu_info_ioctl - answer a device specific request.
*
- * @adev: amdgpu device pointer
+ * @dev: drm device pointer
* @data: request object
* @filp: drm filp
*
@@ -232,17 +610,22 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
* etc. (all asics).
* Returns 0 on success, -EINVAL on failure.
*/
-static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_amdgpu_info *info = data;
struct amdgpu_mode_info *minfo = &adev->mode_info;
void __user *out = (void __user *)(uintptr_t)info->return_pointer;
+ struct amdgpu_fpriv *fpriv;
+ struct amdgpu_ip_block *ip_block;
+ enum amd_ip_block_type type;
+ struct amdgpu_xcp *xcp;
+ u32 count, inst_mask;
uint32_t size = info->return_size;
struct drm_crtc *crtc;
uint32_t ui32 = 0;
uint64_t ui64 = 0;
- int i, found;
+ int i, found, ret;
int ui32_size = sizeof(ui32);
if (!info->return_size || !info->return_pointer)
@@ -257,6 +640,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
crtc = (struct drm_crtc *)minfo->crtcs[i];
if (crtc && crtc->base.id == info->mode_crtc.id) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
ui32 = amdgpu_crtc->crtc_id;
found = 1;
break;
@@ -269,108 +653,82 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
case AMDGPU_INFO_HW_IP_INFO: {
struct drm_amdgpu_info_hw_ip ip = {};
- enum amd_ip_block_type type;
- uint32_t ring_mask = 0;
- uint32_t ib_start_alignment = 0;
- uint32_t ib_size_alignment = 0;
- if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
- return -EINVAL;
+ ret = amdgpu_hw_ip_info(adev, info, &ip);
+ if (ret)
+ return ret;
- switch (info->query_hw_ip.type) {
- case AMDGPU_HW_IP_GFX:
- type = AMD_IP_BLOCK_TYPE_GFX;
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- ring_mask |= ((adev->gfx.gfx_ring[i].ready ? 1 : 0) << i);
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 8;
- break;
- case AMDGPU_HW_IP_COMPUTE:
- type = AMD_IP_BLOCK_TYPE_GFX;
- for (i = 0; i < adev->gfx.num_compute_rings; i++)
- ring_mask |= ((adev->gfx.compute_ring[i].ready ? 1 : 0) << i);
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 8;
- break;
- case AMDGPU_HW_IP_DMA:
- type = AMD_IP_BLOCK_TYPE_SDMA;
- for (i = 0; i < adev->sdma.num_instances; i++)
- ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i);
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 1;
- break;
- case AMDGPU_HW_IP_UVD:
- type = AMD_IP_BLOCK_TYPE_UVD;
- ring_mask = adev->uvd.ring.ready ? 1 : 0;
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 16;
- break;
- case AMDGPU_HW_IP_VCE:
- type = AMD_IP_BLOCK_TYPE_VCE;
- for (i = 0; i < adev->vce.num_rings; i++)
- ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 1;
- break;
- case AMDGPU_HW_IP_UVD_ENC:
- type = AMD_IP_BLOCK_TYPE_UVD;
- for (i = 0; i < adev->uvd.num_enc_rings; i++)
- ring_mask |= ((adev->uvd.ring_enc[i].ready ? 1 : 0) << i);
- ib_start_alignment = AMDGPU_GPU_PAGE_SIZE;
- ib_size_alignment = 1;
- break;
- default:
+ ret = copy_to_user(out, &ip, min_t(size_t, size, sizeof(ip)));
+ return ret ? -EFAULT : 0;
+ }
+ case AMDGPU_INFO_HW_IP_COUNT: {
+ fpriv = (struct amdgpu_fpriv *)filp->driver_priv;
+ type = amdgpu_ip_get_block_type(adev, info->query_hw_ip.type);
+ ip_block = amdgpu_device_ip_get_ip_block(adev, type);
+
+ if (!ip_block || !ip_block->status.valid)
return -EINVAL;
- }
- for (i = 0; i < adev->num_ip_blocks; i++) {
- if (adev->ip_blocks[i].version->type == type &&
- adev->ip_blocks[i].status.valid) {
- ip.hw_ip_version_major = adev->ip_blocks[i].version->major;
- ip.hw_ip_version_minor = adev->ip_blocks[i].version->minor;
- ip.capabilities_flags = 0;
- ip.available_rings = ring_mask;
- ip.ib_start_alignment = ib_start_alignment;
- ip.ib_size_alignment = ib_size_alignment;
+ if (adev->xcp_mgr && adev->xcp_mgr->num_xcps > 0 &&
+ fpriv->xcp_id < adev->xcp_mgr->num_xcps) {
+ xcp = &adev->xcp_mgr->xcp[fpriv->xcp_id];
+ switch (type) {
+ case AMD_IP_BLOCK_TYPE_GFX:
+ ret = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask);
+ if (ret)
+ return ret;
+ count = hweight32(inst_mask);
+ break;
+ case AMD_IP_BLOCK_TYPE_SDMA:
+ ret = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_SDMA, &inst_mask);
+ if (ret)
+ return ret;
+ count = hweight32(inst_mask);
+ break;
+ case AMD_IP_BLOCK_TYPE_JPEG:
+ ret = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
+ if (ret)
+ return ret;
+ count = hweight32(inst_mask) * adev->jpeg.num_jpeg_rings;
break;
+ case AMD_IP_BLOCK_TYPE_VCN:
+ ret = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask);
+ if (ret)
+ return ret;
+ count = hweight32(inst_mask);
+ break;
+ default:
+ return -EINVAL;
}
+
+ return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
}
- return copy_to_user(out, &ip,
- min((size_t)size, sizeof(ip))) ? -EFAULT : 0;
- }
- case AMDGPU_INFO_HW_IP_COUNT: {
- enum amd_ip_block_type type;
- uint32_t count = 0;
- switch (info->query_hw_ip.type) {
- case AMDGPU_HW_IP_GFX:
- type = AMD_IP_BLOCK_TYPE_GFX;
- break;
- case AMDGPU_HW_IP_COMPUTE:
- type = AMD_IP_BLOCK_TYPE_GFX;
+ switch (type) {
+ case AMD_IP_BLOCK_TYPE_GFX:
+ case AMD_IP_BLOCK_TYPE_VCE:
+ count = 1;
break;
- case AMDGPU_HW_IP_DMA:
- type = AMD_IP_BLOCK_TYPE_SDMA;
+ case AMD_IP_BLOCK_TYPE_SDMA:
+ count = adev->sdma.num_instances;
break;
- case AMDGPU_HW_IP_UVD:
- type = AMD_IP_BLOCK_TYPE_UVD;
+ case AMD_IP_BLOCK_TYPE_JPEG:
+ count = adev->jpeg.num_jpeg_inst * adev->jpeg.num_jpeg_rings;
break;
- case AMDGPU_HW_IP_VCE:
- type = AMD_IP_BLOCK_TYPE_VCE;
+ case AMD_IP_BLOCK_TYPE_VCN:
+ count = adev->vcn.num_vcn_inst;
break;
- case AMDGPU_HW_IP_UVD_ENC:
- type = AMD_IP_BLOCK_TYPE_UVD;
+ case AMD_IP_BLOCK_TYPE_UVD:
+ count = adev->uvd.num_uvd_inst;
break;
+ /* For all other IP block types not listed in the switch statement
+ * the ip status is valid here and the instance count is one.
+ */
default:
- return -EINVAL;
+ count = 1;
+ break;
}
- for (i = 0; i < adev->num_ip_blocks; i++)
- if (adev->ip_blocks[i].version->type == type &&
- adev->ip_blocks[i].status.valid &&
- count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
- count++;
-
return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
}
case AMDGPU_INFO_TIMESTAMP:
@@ -378,7 +736,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_FW_VERSION: {
struct drm_amdgpu_info_firmware fw_info;
- int ret;
/* We only support one instance of each IP block right now. */
if (info->query_fw.ip_instance != 0)
@@ -397,65 +754,75 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
case AMDGPU_INFO_NUM_EVICTIONS:
ui64 = atomic64_read(&adev->num_evictions);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
+ case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS:
+ ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
+ return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VRAM_USAGE:
- ui64 = atomic64_read(&adev->vram_usage);
+ ui64 = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VIS_VRAM_USAGE:
- ui64 = atomic64_read(&adev->vram_vis_usage);
+ ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_GTT_USAGE:
- ui64 = atomic64_read(&adev->gtt_usage);
+ ui64 = ttm_resource_manager_usage(&adev->mman.gtt_mgr.manager);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_GDS_CONFIG: {
struct drm_amdgpu_info_gds gds_info;
memset(&gds_info, 0, sizeof(gds_info));
- gds_info.gds_gfx_partition_size = adev->gds.mem.gfx_partition_size >> AMDGPU_GDS_SHIFT;
- gds_info.compute_partition_size = adev->gds.mem.cs_partition_size >> AMDGPU_GDS_SHIFT;
- gds_info.gds_total_size = adev->gds.mem.total_size >> AMDGPU_GDS_SHIFT;
- gds_info.gws_per_gfx_partition = adev->gds.gws.gfx_partition_size >> AMDGPU_GWS_SHIFT;
- gds_info.gws_per_compute_partition = adev->gds.gws.cs_partition_size >> AMDGPU_GWS_SHIFT;
- gds_info.oa_per_gfx_partition = adev->gds.oa.gfx_partition_size >> AMDGPU_OA_SHIFT;
- gds_info.oa_per_compute_partition = adev->gds.oa.cs_partition_size >> AMDGPU_OA_SHIFT;
+ gds_info.compute_partition_size = adev->gds.gds_size;
+ gds_info.gds_total_size = adev->gds.gds_size;
+ gds_info.gws_per_compute_partition = adev->gds.gws_size;
+ gds_info.oa_per_compute_partition = adev->gds.oa_size;
return copy_to_user(out, &gds_info,
min((size_t)size, sizeof(gds_info))) ? -EFAULT : 0;
}
case AMDGPU_INFO_VRAM_GTT: {
struct drm_amdgpu_info_vram_gtt vram_gtt;
- vram_gtt.vram_size = adev->mc.real_vram_size;
- vram_gtt.vram_size -= adev->vram_pin_size;
- vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size;
- vram_gtt.vram_cpu_accessible_size -= (adev->vram_pin_size - adev->invisible_pin_size);
- vram_gtt.gtt_size = adev->mc.gtt_size;
- vram_gtt.gtt_size -= adev->gart_pin_size;
+ vram_gtt.vram_size = adev->gmc.real_vram_size -
+ atomic64_read(&adev->vram_pin_size) -
+ AMDGPU_VM_RESERVED_VRAM;
+ vram_gtt.vram_cpu_accessible_size =
+ min(adev->gmc.visible_vram_size -
+ atomic64_read(&adev->visible_pin_size),
+ vram_gtt.vram_size);
+ vram_gtt.gtt_size = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)->size;
+ vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size);
return copy_to_user(out, &vram_gtt,
min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
}
case AMDGPU_INFO_MEMORY: {
struct drm_amdgpu_memory_info mem;
+ struct ttm_resource_manager *gtt_man =
+ &adev->mman.gtt_mgr.manager;
+ struct ttm_resource_manager *vram_man =
+ &adev->mman.vram_mgr.manager;
memset(&mem, 0, sizeof(mem));
- mem.vram.total_heap_size = adev->mc.real_vram_size;
- mem.vram.usable_heap_size =
- adev->mc.real_vram_size - adev->vram_pin_size;
- mem.vram.heap_usage = atomic64_read(&adev->vram_usage);
+ mem.vram.total_heap_size = adev->gmc.real_vram_size;
+ mem.vram.usable_heap_size = adev->gmc.real_vram_size -
+ atomic64_read(&adev->vram_pin_size) -
+ AMDGPU_VM_RESERVED_VRAM;
+ mem.vram.heap_usage =
+ ttm_resource_manager_usage(vram_man);
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size =
- adev->mc.visible_vram_size;
+ adev->gmc.visible_vram_size;
mem.cpu_accessible_vram.usable_heap_size =
- adev->mc.visible_vram_size -
- (adev->vram_pin_size - adev->invisible_pin_size);
+ min(adev->gmc.visible_vram_size -
+ atomic64_read(&adev->visible_pin_size),
+ mem.vram.usable_heap_size);
mem.cpu_accessible_vram.heap_usage =
- atomic64_read(&adev->vram_vis_usage);
+ amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
mem.cpu_accessible_vram.max_allocation =
mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
- mem.gtt.total_heap_size = adev->mc.gtt_size;
- mem.gtt.usable_heap_size =
- adev->mc.gtt_size - adev->gart_pin_size;
- mem.gtt.heap_usage = atomic64_read(&adev->gtt_usage);
+ mem.gtt.total_heap_size = gtt_man->size;
+ mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
+ atomic64_read(&adev->gart_pin_size);
+ mem.gtt.heap_usage = ttm_resource_manager_usage(gtt_man);
mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
return copy_to_user(out, &mem,
@@ -463,110 +830,218 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
? -EFAULT : 0;
}
case AMDGPU_INFO_READ_MMR_REG: {
- unsigned n, alloc_size;
+ int ret = 0;
+ unsigned int n, alloc_size;
uint32_t *regs;
- unsigned se_num = (info->read_mmr_reg.instance >>
+ unsigned int se_num = (info->read_mmr_reg.instance >>
AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
AMDGPU_INFO_MMR_SE_INDEX_MASK;
- unsigned sh_num = (info->read_mmr_reg.instance >>
+ unsigned int sh_num = (info->read_mmr_reg.instance >>
AMDGPU_INFO_MMR_SH_INDEX_SHIFT) &
AMDGPU_INFO_MMR_SH_INDEX_MASK;
+ if (!down_read_trylock(&adev->reset_domain->sem))
+ return -ENOENT;
+
/* set full masks if the userspace set all bits
- * in the bitfields */
- if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
+ * in the bitfields
+ */
+ if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK) {
se_num = 0xffffffff;
- if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
+ } else if (se_num >= AMDGPU_GFX_MAX_SE) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK) {
sh_num = 0xffffffff;
+ } else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (info->read_mmr_reg.count > 128) {
+ ret = -EINVAL;
+ goto out;
+ }
regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
- if (!regs)
- return -ENOMEM;
+ if (!regs) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
alloc_size = info->read_mmr_reg.count * sizeof(*regs);
- for (i = 0; i < info->read_mmr_reg.count; i++)
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (i = 0; i < info->read_mmr_reg.count; i++) {
if (amdgpu_asic_read_register(adev, se_num, sh_num,
info->read_mmr_reg.dword_offset + i,
&regs[i])) {
DRM_DEBUG_KMS("unallowed offset %#x\n",
info->read_mmr_reg.dword_offset + i);
kfree(regs);
- return -EFAULT;
+ amdgpu_gfx_off_ctrl(adev, true);
+ ret = -EFAULT;
+ goto out;
}
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
n = copy_to_user(out, regs, min(size, alloc_size));
kfree(regs);
- return n ? -EFAULT : 0;
+ ret = (n ? -EFAULT : 0);
+out:
+ up_read(&adev->reset_domain->sem);
+ return ret;
}
case AMDGPU_INFO_DEV_INFO: {
- struct drm_amdgpu_info_device dev_info = {};
-
- dev_info.device_id = dev->pdev->device;
- dev_info.chip_rev = adev->rev_id;
- dev_info.external_rev = adev->external_rev_id;
- dev_info.pci_rev = dev->pdev->revision;
- dev_info.family = adev->family;
- dev_info.num_shader_engines = adev->gfx.config.max_shader_engines;
- dev_info.num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
+ struct drm_amdgpu_info_device *dev_info;
+ uint64_t vm_size;
+ uint32_t pcie_gen_mask, pcie_width_mask;
+
+ dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
+ if (!dev_info)
+ return -ENOMEM;
+
+ dev_info->device_id = adev->pdev->device;
+ dev_info->chip_rev = adev->rev_id;
+ dev_info->external_rev = adev->external_rev_id;
+ dev_info->pci_rev = adev->pdev->revision;
+ dev_info->family = adev->family;
+ dev_info->num_shader_engines = adev->gfx.config.max_shader_engines;
+ dev_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
/* return all clocks in KHz */
- dev_info.gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
+ dev_info->gpu_counter_freq = amdgpu_asic_get_xclk(adev) * 10;
if (adev->pm.dpm_enabled) {
- dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
- dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
+ dev_info->max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
+ dev_info->max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
+ dev_info->min_engine_clock = amdgpu_dpm_get_sclk(adev, true) * 10;
+ dev_info->min_memory_clock = amdgpu_dpm_get_mclk(adev, true) * 10;
} else {
- dev_info.max_engine_clock = adev->pm.default_sclk * 10;
- dev_info.max_memory_clock = adev->pm.default_mclk * 10;
+ dev_info->max_engine_clock =
+ dev_info->min_engine_clock =
+ adev->clock.default_sclk * 10;
+ dev_info->max_memory_clock =
+ dev_info->min_memory_clock =
+ adev->clock.default_mclk * 10;
}
- dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
- dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
+ dev_info->enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
+ dev_info->num_rb_pipes = adev->gfx.config.max_backends_per_se *
adev->gfx.config.max_shader_engines;
- dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
- dev_info._pad = 0;
- dev_info.ids_flags = 0;
+ dev_info->num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
+ dev_info->ids_flags = 0;
if (adev->flags & AMD_IS_APU)
- dev_info.ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
- if (amdgpu_sriov_vf(adev))
- dev_info.ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
- dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
- dev_info.virtual_address_max = (uint64_t)adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
- dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
- dev_info.pte_fragment_size = (1 << AMDGPU_LOG2_PAGES_PER_FRAG) *
- AMDGPU_GPU_PAGE_SIZE;
- dev_info.gart_page_size = AMDGPU_GPU_PAGE_SIZE;
-
- dev_info.cu_active_number = adev->gfx.cu_info.number;
- dev_info.cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
- dev_info.ce_ram_size = adev->gfx.ce_ram_size;
- memcpy(&dev_info.cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
- sizeof(adev->gfx.cu_info.bitmap));
- dev_info.vram_type = adev->mc.vram_type;
- dev_info.vram_bit_width = adev->mc.vram_width;
- dev_info.vce_harvest_config = adev->vce.harvest_config;
- dev_info.gc_double_offchip_lds_buf =
+ dev_info->ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
+ if (adev->gfx.mcbp)
+ dev_info->ids_flags |= AMDGPU_IDS_FLAGS_PREEMPTION;
+ if (amdgpu_is_tmz(adev))
+ dev_info->ids_flags |= AMDGPU_IDS_FLAGS_TMZ;
+ if (adev->gfx.config.ta_cntl2_truncate_coord_mode)
+ dev_info->ids_flags |= AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD;
+
+ /* Gang submit is not supported under SRIOV currently */
+ if (!amdgpu_sriov_vf(adev))
+ dev_info->ids_flags |= AMDGPU_IDS_FLAGS_GANG_SUBMIT;
+
+ if (amdgpu_passthrough(adev))
+ dev_info->ids_flags |= (AMDGPU_IDS_FLAGS_MODE_PT <<
+ AMDGPU_IDS_FLAGS_MODE_SHIFT) &
+ AMDGPU_IDS_FLAGS_MODE_MASK;
+ else if (amdgpu_sriov_vf(adev))
+ dev_info->ids_flags |= (AMDGPU_IDS_FLAGS_MODE_VF <<
+ AMDGPU_IDS_FLAGS_MODE_SHIFT) &
+ AMDGPU_IDS_FLAGS_MODE_MASK;
+
+ vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
+ vm_size -= AMDGPU_VA_RESERVED_TOP;
+
+ /* Older VCE FW versions are buggy and can handle only 40bits */
+ if (adev->vce.fw_version &&
+ adev->vce.fw_version < AMDGPU_VCE_FW_53_45)
+ vm_size = min(vm_size, 1ULL << 40);
+
+ dev_info->virtual_address_offset = AMDGPU_VA_RESERVED_BOTTOM;
+ dev_info->virtual_address_max =
+ min(vm_size, AMDGPU_GMC_HOLE_START);
+
+ if (vm_size > AMDGPU_GMC_HOLE_START) {
+ dev_info->high_va_offset = AMDGPU_GMC_HOLE_END;
+ dev_info->high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
+ }
+ dev_info->virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
+ dev_info->pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
+ dev_info->gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
+ dev_info->cu_active_number = adev->gfx.cu_info.number;
+ dev_info->cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
+ dev_info->ce_ram_size = adev->gfx.ce_ram_size;
+ memcpy(&dev_info->cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
+ sizeof(adev->gfx.cu_info.ao_cu_bitmap));
+ memcpy(&dev_info->cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
+ sizeof(dev_info->cu_bitmap));
+ dev_info->vram_type = adev->gmc.vram_type;
+ dev_info->vram_bit_width = adev->gmc.vram_width;
+ dev_info->vce_harvest_config = adev->vce.harvest_config;
+ dev_info->gc_double_offchip_lds_buf =
adev->gfx.config.double_offchip_lds_buf;
-
- if (amdgpu_ngg) {
- dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr;
- dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size;
- dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr;
- dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size;
- dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr;
- dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size;
- dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr;
- dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size;
+ dev_info->wave_front_size = adev->gfx.cu_info.wave_front_size;
+ dev_info->num_shader_visible_vgprs = adev->gfx.config.max_gprs;
+ dev_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
+ dev_info->num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
+ dev_info->gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
+ dev_info->gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
+ dev_info->max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
+
+ if (adev->family >= AMDGPU_FAMILY_NV)
+ dev_info->pa_sc_tile_steering_override =
+ adev->gfx.config.pa_sc_tile_steering_override;
+
+ dev_info->tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
+
+ /* Combine the chip gen mask with the platform (CPU/mobo) mask. */
+ pcie_gen_mask = adev->pm.pcie_gen_mask &
+ (adev->pm.pcie_gen_mask >> CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT);
+ pcie_width_mask = adev->pm.pcie_mlw_mask &
+ (adev->pm.pcie_mlw_mask >> CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT);
+ dev_info->pcie_gen = fls(pcie_gen_mask);
+ dev_info->pcie_num_lanes =
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X32 ? 32 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X16 ? 16 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X12 ? 12 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X8 ? 8 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X4 ? 4 :
+ pcie_width_mask & CAIL_ASIC_PCIE_LINK_WIDTH_SUPPORT_X2 ? 2 : 1;
+
+ dev_info->tcp_cache_size = adev->gfx.config.gc_tcp_l1_size;
+ dev_info->num_sqc_per_wgp = adev->gfx.config.gc_num_sqc_per_wgp;
+ dev_info->sqc_data_cache_size = adev->gfx.config.gc_l1_data_cache_size_per_sqc;
+ dev_info->sqc_inst_cache_size = adev->gfx.config.gc_l1_instruction_cache_size_per_sqc;
+ dev_info->gl1c_cache_size = adev->gfx.config.gc_gl1c_size_per_instance *
+ adev->gfx.config.gc_gl1c_per_sa;
+ dev_info->gl2c_cache_size = adev->gfx.config.gc_gl2c_per_gpu;
+ dev_info->mall_size = adev->gmc.mall_size;
+
+
+ if (adev->gfx.funcs->get_gfx_shadow_info) {
+ struct amdgpu_gfx_shadow_info shadow_info;
+
+ ret = amdgpu_gfx_get_gfx_shadow_info(adev, &shadow_info);
+ if (!ret) {
+ dev_info->shadow_size = shadow_info.shadow_size;
+ dev_info->shadow_alignment = shadow_info.shadow_alignment;
+ dev_info->csa_size = shadow_info.csa_size;
+ dev_info->csa_alignment = shadow_info.csa_alignment;
+ }
}
- dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size;
- dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs;
- dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
- dev_info.num_tcc_blocks = adev->gfx.config.max_texture_channel_caches;
- dev_info.gs_vgt_table_depth = adev->gfx.config.gs_vgt_table_depth;
- dev_info.gs_prim_buffer_depth = adev->gfx.config.gs_prim_buffer_depth;
- dev_info.max_gs_waves_per_vgt = adev->gfx.config.max_gs_threads;
-
- return copy_to_user(out, &dev_info,
- min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
+
+ dev_info->userq_ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+
+ ret = copy_to_user(out, dev_info,
+ min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0;
+ kfree(dev_info);
+ return ret;
}
case AMDGPU_INFO_VCE_CLOCK_TABLE: {
- unsigned i;
+ unsigned int i;
struct drm_amdgpu_info_vce_clock_table vce_clk_table = {};
struct amd_vce_state *vce_state;
@@ -603,6 +1078,26 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
min((size_t)size, (size_t)(bios_size - bios_offset)))
? -EFAULT : 0;
}
+ case AMDGPU_INFO_VBIOS_INFO: {
+ struct drm_amdgpu_info_vbios vbios_info = {};
+ struct atom_context *atom_context;
+
+ atom_context = adev->mode_info.atom_context;
+ if (atom_context) {
+ memcpy(vbios_info.name, atom_context->name,
+ sizeof(atom_context->name));
+ memcpy(vbios_info.vbios_pn, atom_context->vbios_pn,
+ sizeof(atom_context->vbios_pn));
+ vbios_info.version = atom_context->version;
+ memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str,
+ sizeof(atom_context->vbios_ver_str));
+ memcpy(vbios_info.date, atom_context->date,
+ sizeof(atom_context->date));
+ }
+
+ return copy_to_user(out, &vbios_info,
+ min((size_t)size, sizeof(vbios_info))) ? -EFAULT : 0;
+ }
default:
DRM_DEBUG_KMS("Invalid request %d\n",
info->vbios_info.type);
@@ -631,10 +1126,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
}
case AMDGPU_INFO_SENSOR: {
- struct pp_gpu_power query = {0};
- int query_size = sizeof(query);
-
- if (amdgpu_dpm == 0)
+ if (!adev->pm.dpm_enabled)
return -ENOENT;
switch (info->sensor_info.type) {
@@ -675,11 +1167,25 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
case AMDGPU_INFO_SENSOR_GPU_AVG_POWER:
/* get average GPU power */
if (amdgpu_dpm_read_sensor(adev,
- AMDGPU_PP_SENSOR_GPU_POWER,
- (void *)&query, &query_size)) {
+ AMDGPU_PP_SENSOR_GPU_AVG_POWER,
+ (void *)&ui32, &ui32_size)) {
+ /* fall back to input power for backwards compat */
+ if (amdgpu_dpm_read_sensor(adev,
+ AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
+ (void *)&ui32, &ui32_size)) {
+ return -EINVAL;
+ }
+ }
+ ui32 >>= 8;
+ break;
+ case AMDGPU_INFO_SENSOR_GPU_INPUT_POWER:
+ /* get input GPU power */
+ if (amdgpu_dpm_read_sensor(adev,
+ AMDGPU_PP_SENSOR_GPU_INPUT_POWER,
+ (void *)&ui32, &ui32_size)) {
return -EINVAL;
}
- ui32 = query.average_gpu_power >> 8;
+ ui32 >>= 8;
break;
case AMDGPU_INFO_SENSOR_VDDNB:
/* get VDDNB in millivolts */
@@ -697,6 +1203,42 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return -EINVAL;
}
break;
+ case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK:
+ /* get stable pstate sclk in Mhz */
+ if (amdgpu_dpm_read_sensor(adev,
+ AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK,
+ (void *)&ui32, &ui32_size)) {
+ return -EINVAL;
+ }
+ ui32 /= 100;
+ break;
+ case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK:
+ /* get stable pstate mclk in Mhz */
+ if (amdgpu_dpm_read_sensor(adev,
+ AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK,
+ (void *)&ui32, &ui32_size)) {
+ return -EINVAL;
+ }
+ ui32 /= 100;
+ break;
+ case AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_SCLK:
+ /* get peak pstate sclk in Mhz */
+ if (amdgpu_dpm_read_sensor(adev,
+ AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK,
+ (void *)&ui32, &ui32_size)) {
+ return -EINVAL;
+ }
+ ui32 /= 100;
+ break;
+ case AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_MCLK:
+ /* get peak pstate mclk in Mhz */
+ if (amdgpu_dpm_read_sensor(adev,
+ AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK,
+ (void *)&ui32, &ui32_size)) {
+ return -EINVAL;
+ }
+ ui32 /= 100;
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n",
info->sensor_info.type);
@@ -704,6 +1246,126 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
}
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
}
+ case AMDGPU_INFO_VRAM_LOST_COUNTER:
+ ui32 = atomic_read(&adev->vram_lost_counter);
+ return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
+ case AMDGPU_INFO_RAS_ENABLED_FEATURES: {
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ uint64_t ras_mask;
+
+ if (!ras)
+ return -EINVAL;
+ ras_mask = (uint64_t)adev->ras_enabled << 32 | ras->features;
+
+ return copy_to_user(out, &ras_mask,
+ min_t(u64, size, sizeof(ras_mask))) ?
+ -EFAULT : 0;
+ }
+ case AMDGPU_INFO_VIDEO_CAPS: {
+ const struct amdgpu_video_codecs *codecs;
+ struct drm_amdgpu_info_video_caps *caps;
+ int r;
+
+ if (!adev->asic_funcs->query_video_codecs)
+ return -EINVAL;
+
+ switch (info->video_cap.type) {
+ case AMDGPU_INFO_VIDEO_CAPS_DECODE:
+ r = amdgpu_asic_query_video_codecs(adev, false, &codecs);
+ if (r)
+ return -EINVAL;
+ break;
+ case AMDGPU_INFO_VIDEO_CAPS_ENCODE:
+ r = amdgpu_asic_query_video_codecs(adev, true, &codecs);
+ if (r)
+ return -EINVAL;
+ break;
+ default:
+ DRM_DEBUG_KMS("Invalid request %d\n",
+ info->video_cap.type);
+ return -EINVAL;
+ }
+
+ caps = kzalloc(sizeof(*caps), GFP_KERNEL);
+ if (!caps)
+ return -ENOMEM;
+
+ for (i = 0; i < codecs->codec_count; i++) {
+ int idx = codecs->codec_array[i].codec_type;
+
+ switch (idx) {
+ case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2:
+ case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4:
+ case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1:
+ case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC:
+ case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC:
+ case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG:
+ case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9:
+ case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1:
+ caps->codec_info[idx].valid = 1;
+ caps->codec_info[idx].max_width =
+ codecs->codec_array[i].max_width;
+ caps->codec_info[idx].max_height =
+ codecs->codec_array[i].max_height;
+ caps->codec_info[idx].max_pixels_per_frame =
+ codecs->codec_array[i].max_pixels_per_frame;
+ caps->codec_info[idx].max_level =
+ codecs->codec_array[i].max_level;
+ break;
+ default:
+ break;
+ }
+ }
+ r = copy_to_user(out, caps,
+ min((size_t)size, sizeof(*caps))) ? -EFAULT : 0;
+ kfree(caps);
+ return r;
+ }
+ case AMDGPU_INFO_MAX_IBS: {
+ uint32_t max_ibs[AMDGPU_HW_IP_NUM];
+
+ for (i = 0; i < AMDGPU_HW_IP_NUM; ++i)
+ max_ibs[i] = amdgpu_ring_max_ibs(i);
+
+ return copy_to_user(out, max_ibs,
+ min((size_t)size, sizeof(max_ibs))) ? -EFAULT : 0;
+ }
+ case AMDGPU_INFO_GPUVM_FAULT: {
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
+ struct drm_amdgpu_info_gpuvm_fault gpuvm_fault;
+ unsigned long flags;
+
+ if (!vm)
+ return -EINVAL;
+
+ memset(&gpuvm_fault, 0, sizeof(gpuvm_fault));
+
+ xa_lock_irqsave(&adev->vm_manager.pasids, flags);
+ gpuvm_fault.addr = vm->fault_info.addr;
+ gpuvm_fault.status = vm->fault_info.status;
+ gpuvm_fault.vmhub = vm->fault_info.vmhub;
+ xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
+
+ return copy_to_user(out, &gpuvm_fault,
+ min((size_t)size, sizeof(gpuvm_fault))) ? -EFAULT : 0;
+ }
+ case AMDGPU_INFO_UQ_FW_AREAS: {
+ struct drm_amdgpu_info_uq_metadata meta_info = {};
+
+ switch (info->query_hw_ip.type) {
+ case AMDGPU_HW_IP_GFX:
+ ret = amdgpu_userq_metadata_info_gfx(adev, info, &meta_info.gfx);
+ if (ret)
+ return ret;
+
+ ret = copy_to_user(out, &meta_info,
+ min((size_t)size, sizeof(meta_info))) ? -EFAULT : 0;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ }
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL;
@@ -711,25 +1373,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return 0;
}
-
-/*
- * Outdated mess for old drm with Xorg being in charge (void function now).
- */
-/**
- * amdgpu_driver_lastclose_kms - drm callback for last close
- *
- * @dev: drm dev pointer
- *
- * Switch vga_switcheroo state after last close (all asics).
- */
-void amdgpu_driver_lastclose_kms(struct drm_device *dev)
-{
- struct amdgpu_device *adev = dev->dev_private;
-
- amdgpu_fbdev_restore_mode(adev);
- vga_switcheroo_process_delayed_switch();
-}
-
/**
* amdgpu_driver_open_kms - drm callback for open
*
@@ -741,15 +1384,24 @@ void amdgpu_driver_lastclose_kms(struct drm_device *dev)
*/
int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv;
- int r;
+ int r, pasid;
+
+ /* Ensure IB tests are run on ring */
+ flush_delayed_work(&adev->delayed_init_work);
+
+
+ if (amdgpu_ras_intr_triggered()) {
+ DRM_ERROR("RAS Intr triggered, device disabled!!");
+ return -EHWPOISON;
+ }
file_priv->driver_priv = NULL;
r = pm_runtime_get_sync(dev->dev);
if (r < 0)
- return r;
+ goto pm_put;
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
if (unlikely(!fpriv)) {
@@ -757,35 +1409,69 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
goto out_suspend;
}
- r = amdgpu_vm_init(adev, &fpriv->vm);
- if (r) {
- kfree(fpriv);
- goto out_suspend;
+ pasid = amdgpu_pasid_alloc(16);
+ if (pasid < 0) {
+ dev_warn(adev->dev, "No more PASIDs available!");
+ pasid = 0;
}
+ r = amdgpu_xcp_open_device(adev, fpriv, file_priv);
+ if (r)
+ goto error_pasid;
+
+ amdgpu_debugfs_vm_init(file_priv);
+
+ r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id, pasid);
+ if (r)
+ goto error_pasid;
+
fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
if (!fpriv->prt_va) {
r = -ENOMEM;
- amdgpu_vm_fini(adev, &fpriv->vm);
- kfree(fpriv);
- goto out_suspend;
+ goto error_vm;
}
- if (amdgpu_sriov_vf(adev)) {
- r = amdgpu_map_static_csa(adev, &fpriv->vm);
+ if (adev->gfx.mcbp) {
+ uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
+
+ r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
+ &fpriv->csa_va, csa_addr, AMDGPU_CSA_SIZE);
if (r)
- goto out_suspend;
+ goto error_vm;
}
+ r = amdgpu_seq64_map(adev, &fpriv->vm, &fpriv->seq64_va);
+ if (r)
+ goto error_vm;
+
mutex_init(&fpriv->bo_list_lock);
- idr_init(&fpriv->bo_list_handles);
+ idr_init_base(&fpriv->bo_list_handles, 1);
+
+ r = amdgpu_userq_mgr_init(&fpriv->userq_mgr, file_priv, adev);
+ if (r)
+ DRM_WARN("Can't setup usermode queues, use legacy workload submission only\n");
- amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
+ r = amdgpu_eviction_fence_init(&fpriv->evf_mgr);
+ if (r)
+ goto error_vm;
+
+ amdgpu_ctx_mgr_init(&fpriv->ctx_mgr, adev);
file_priv->driver_priv = fpriv;
+ goto out_suspend;
+
+error_vm:
+ amdgpu_vm_fini(adev, &fpriv->vm);
+
+error_pasid:
+ if (pasid)
+ amdgpu_pasid_free(pasid);
+
+ kfree(fpriv);
out_suspend:
pm_runtime_mark_last_busy(dev->dev);
+pm_put:
pm_runtime_put_autosuspend(dev->dev);
return r;
@@ -802,9 +1488,11 @@ out_suspend:
void amdgpu_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_bo_list *list;
+ struct amdgpu_bo *pd;
+ u32 pasid;
int handle;
if (!fpriv)
@@ -812,25 +1500,37 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
pm_runtime_get_sync(dev->dev);
- amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
+ if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_UVD) != NULL)
+ amdgpu_uvd_free_handles(adev, file_priv);
+ if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
+ amdgpu_vce_free_handles(adev, file_priv);
+
+ if (fpriv->csa_va) {
+ uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
- amdgpu_uvd_free_handles(adev, file_priv);
- amdgpu_vce_free_handles(adev, file_priv);
+ WARN_ON(amdgpu_unmap_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
+ fpriv->csa_va, csa_addr));
+ fpriv->csa_va = NULL;
+ }
- amdgpu_vm_bo_rmv(adev, fpriv->prt_va);
+ amdgpu_seq64_unmap(adev, fpriv);
- if (amdgpu_sriov_vf(adev)) {
- /* TODO: how to handle reserve failure */
- BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
- amdgpu_vm_bo_rmv(adev, fpriv->vm.csa_bo_va);
- fpriv->vm.csa_bo_va = NULL;
- amdgpu_bo_unreserve(adev->virt.csa_obj);
+ pasid = fpriv->vm.pasid;
+ pd = amdgpu_bo_ref(fpriv->vm.root.bo);
+ if (!WARN_ON(amdgpu_bo_reserve(pd, true))) {
+ amdgpu_vm_bo_del(adev, fpriv->prt_va);
+ amdgpu_bo_unreserve(pd);
}
+ amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
amdgpu_vm_fini(adev, &fpriv->vm);
+ if (pasid)
+ amdgpu_pasid_free_delayed(pd->tbo.base.resv, pasid);
+ amdgpu_bo_unref(&pd);
+
idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
- amdgpu_bo_list_free(list);
+ amdgpu_bo_list_put(list);
idr_destroy(&fpriv->bo_list_handles);
mutex_destroy(&fpriv->bo_list_lock);
@@ -842,21 +1542,31 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
pm_runtime_put_autosuspend(dev->dev);
}
+
+void amdgpu_driver_release_kms(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+
+ amdgpu_device_fini_sw(adev);
+ pci_set_drvdata(adev->pdev, NULL);
+}
+
/*
* VBlank related functions.
*/
/**
* amdgpu_get_vblank_counter_kms - get frame count
*
- * @dev: drm dev pointer
- * @pipe: crtc to get the frame count from
+ * @crtc: crtc to get the frame count from
*
* Gets the frame count on the requested crtc (all asics).
* Returns frame count on success, -EINVAL on failure.
*/
-u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
+u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc)
{
- struct amdgpu_device *adev = dev->dev_private;
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int vpos, hpos, stat;
u32 count;
@@ -879,11 +1589,11 @@ u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
*/
do {
count = amdgpu_display_vblank_get_counter(adev, pipe);
- /* Ask amdgpu_get_crtc_scanoutpos to return vpos as
- * distance to start of vblank, instead of regular
- * vertical scanout pos.
+ /* Ask amdgpu_display_get_crtc_scanoutpos to return
+ * vpos as distance to start of vblank, instead of
+ * regular vertical scanout pos.
*/
- stat = amdgpu_get_crtc_scanoutpos(
+ stat = amdgpu_display_get_crtc_scanoutpos(
dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
&vpos, &hpos, NULL, NULL,
&adev->mode_info.crtcs[pipe]->base.hwmode);
@@ -915,16 +1625,17 @@ u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
/**
* amdgpu_enable_vblank_kms - enable vblank interrupt
*
- * @dev: drm dev pointer
- * @pipe: crtc to enable vblank interrupt for
+ * @crtc: crtc to enable vblank interrupt for
*
* Enable the interrupt on the requested crtc (all asics).
* Returns 0 on success, -EINVAL on failure.
*/
-int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
+int amdgpu_enable_vblank_kms(struct drm_crtc *crtc)
{
- struct amdgpu_device *adev = dev->dev_private;
- int idx = amdgpu_crtc_idx_to_irq_type(adev, pipe);
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
return amdgpu_irq_get(adev, &adev->crtc_irq, idx);
}
@@ -932,92 +1643,45 @@ int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
/**
* amdgpu_disable_vblank_kms - disable vblank interrupt
*
- * @dev: drm dev pointer
- * @pipe: crtc to disable vblank interrupt for
+ * @crtc: crtc to disable vblank interrupt for
*
* Disable the interrupt on the requested crtc (all asics).
*/
-void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
+void amdgpu_disable_vblank_kms(struct drm_crtc *crtc)
{
- struct amdgpu_device *adev = dev->dev_private;
- int idx = amdgpu_crtc_idx_to_irq_type(adev, pipe);
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
amdgpu_irq_put(adev, &adev->crtc_irq, idx);
}
-/**
- * amdgpu_get_vblank_timestamp_kms - get vblank timestamp
- *
- * @dev: drm dev pointer
- * @crtc: crtc to get the timestamp for
- * @max_error: max error
- * @vblank_time: time value
- * @flags: flags passed to the driver
- *
- * Gets the timestamp on the requested crtc based on the
- * scanout position. (all asics).
- * Returns postive status flags on success, negative error on failure.
- */
-int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
- int *max_error,
- struct timeval *vblank_time,
- unsigned flags)
-{
- struct drm_crtc *crtc;
- struct amdgpu_device *adev = dev->dev_private;
-
- if (pipe >= dev->num_crtcs) {
- DRM_ERROR("Invalid crtc %u\n", pipe);
- return -EINVAL;
- }
-
- /* Get associated drm_crtc: */
- crtc = &adev->mode_info.crtcs[pipe]->base;
- if (!crtc) {
- /* This can occur on driver load if some component fails to
- * initialize completely and driver is unloaded */
- DRM_ERROR("Uninitialized crtc %d\n", pipe);
- return -EINVAL;
- }
-
- /* Helper routine in DRM core does all the work: */
- return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
- vblank_time, flags,
- &crtc->hwmode);
-}
-
-const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
- DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- /* KMS */
- DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
-};
-const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
-
/*
* Debugfs info
*/
#if defined(CONFIG_DEBUG_FS)
-static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
+static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = m->private;
struct drm_amdgpu_info_firmware fw_info;
struct drm_amdgpu_query_fw query_fw;
+ struct atom_context *ctx = adev->mode_info.atom_context;
+ uint8_t smu_program, smu_major, smu_minor, smu_debug;
int ret, i;
+ static const char *ta_fw_name[TA_FW_TYPE_MAX_INDEX] = {
+#define TA_FW_NAME(type)[TA_FW_TYPE_PSP_##type] = #type
+ TA_FW_NAME(XGMI),
+ TA_FW_NAME(RAS),
+ TA_FW_NAME(HDCP),
+ TA_FW_NAME(DTM),
+ TA_FW_NAME(RAP),
+ TA_FW_NAME(SECUREDISPLAY),
+#undef TA_FW_NAME
+ };
+
/* VCE */
query_fw.fw_type = AMDGPU_INFO_FW_VCE;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
@@ -1074,6 +1738,46 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
+ /* RLC SAVE RESTORE LIST CNTL */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* RLC SAVE RESTORE LIST GPM MEM */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* RLC SAVE RESTORE LIST SRM MEM */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* RLCP */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCP;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "RLCP feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* RLCV */
+ query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLCV;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "RLCV feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
/* MEC */
query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
query_fw.index = 0;
@@ -1084,8 +1788,7 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
fw_info.feature, fw_info.ver);
/* MEC2 */
- if (adev->asic_type == CHIP_KAVERI ||
- (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) {
+ if (adev->gfx.mec2_fw) {
query_fw.index = 1;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
@@ -1094,6 +1797,15 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
fw_info.feature, fw_info.ver);
}
+ /* IMU */
+ query_fw.fw_type = AMDGPU_INFO_FW_IMU;
+ query_fw.index = 0;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "IMU feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
/* PSP SOS */
query_fw.fw_type = AMDGPU_INFO_FW_SOS;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
@@ -1111,13 +1823,28 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
+ query_fw.fw_type = AMDGPU_INFO_FW_TA;
+ for (i = TA_FW_TYPE_PSP_XGMI; i < TA_FW_TYPE_MAX_INDEX; i++) {
+ query_fw.index = i;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ continue;
+
+ seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
+ ta_fw_name[i], fw_info.feature, fw_info.ver);
+ }
+
/* SMC */
query_fw.fw_type = AMDGPU_INFO_FW_SMC;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
return ret;
- seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
- fw_info.feature, fw_info.ver);
+ smu_program = (fw_info.ver >> 24) & 0xff;
+ smu_major = (fw_info.ver >> 16) & 0xff;
+ smu_minor = (fw_info.ver >> 8) & 0xff;
+ smu_debug = (fw_info.ver >> 0) & 0xff;
+ seq_printf(m, "SMC feature version: %u, program: %d, firmware version: 0x%08x (%d.%d.%d)\n",
+ fw_info.feature, smu_program, fw_info.ver, smu_major, smu_minor, smu_debug);
/* SDMA */
query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
@@ -1130,20 +1857,89 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
i, fw_info.feature, fw_info.ver);
}
+ /* VCN */
+ query_fw.fw_type = AMDGPU_INFO_FW_VCN;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* DMCU */
+ query_fw.fw_type = AMDGPU_INFO_FW_DMCU;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "DMCU feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* DMCUB */
+ query_fw.fw_type = AMDGPU_INFO_FW_DMCUB;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "DMCUB feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* TOC */
+ query_fw.fw_type = AMDGPU_INFO_FW_TOC;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "TOC feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* CAP */
+ if (adev->psp.cap_fw) {
+ query_fw.fw_type = AMDGPU_INFO_FW_CAP;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "CAP feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+ }
+
+ /* MES_KIQ */
+ query_fw.fw_type = AMDGPU_INFO_FW_MES_KIQ;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "MES_KIQ feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* MES */
+ query_fw.fw_type = AMDGPU_INFO_FW_MES;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "MES feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ /* VPE */
+ query_fw.fw_type = AMDGPU_INFO_FW_VPE;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ return ret;
+ seq_printf(m, "VPE feature version: %u, firmware version: 0x%08x\n",
+ fw_info.feature, fw_info.ver);
+
+ seq_printf(m, "VBIOS version: %s\n", ctx->vbios_pn);
+
return 0;
}
-static const struct drm_info_list amdgpu_firmware_info_list[] = {
- {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
-};
+DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_firmware_info);
+
#endif
-int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
+void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
- return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
- ARRAY_SIZE(amdgpu_firmware_info_list));
-#else
- return 0;
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+
+ debugfs_create_file("amdgpu_firmware_info", 0444, root,
+ adev, &amdgpu_debugfs_firmware_info_fops);
+
#endif
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_lsdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_lsdma.c
new file mode 100644
index 000000000000..4d1d4994ea3f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_lsdma.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_lsdma.h"
+
+#define AMDGPU_LSDMA_MAX_SIZE 0x2000000ULL
+
+int amdgpu_lsdma_wait_for(struct amdgpu_device *adev,
+ uint32_t reg_index, uint32_t reg_val,
+ uint32_t mask)
+{
+ uint32_t val;
+ int i;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ val = RREG32(reg_index);
+ if ((val & mask) == reg_val)
+ return 0;
+ udelay(1);
+ }
+
+ return -ETIME;
+}
+
+int amdgpu_lsdma_copy_mem(struct amdgpu_device *adev,
+ uint64_t src_addr,
+ uint64_t dst_addr,
+ uint64_t mem_size)
+{
+ int ret;
+
+ if (mem_size == 0)
+ return -EINVAL;
+
+ while (mem_size > 0) {
+ uint64_t current_copy_size = min(mem_size, AMDGPU_LSDMA_MAX_SIZE);
+
+ ret = adev->lsdma.funcs->copy_mem(adev, src_addr, dst_addr, current_copy_size);
+ if (ret)
+ return ret;
+ src_addr += current_copy_size;
+ dst_addr += current_copy_size;
+ mem_size -= current_copy_size;
+ }
+
+ return 0;
+}
+
+int amdgpu_lsdma_fill_mem(struct amdgpu_device *adev,
+ uint64_t dst_addr,
+ uint32_t data,
+ uint64_t mem_size)
+{
+ int ret;
+
+ if (mem_size == 0)
+ return -EINVAL;
+
+ while (mem_size > 0) {
+ uint64_t current_fill_size = min(mem_size, AMDGPU_LSDMA_MAX_SIZE);
+
+ ret = adev->lsdma.funcs->fill_mem(adev, dst_addr, data, current_fill_size);
+ if (ret)
+ return ret;
+ dst_addr += current_fill_size;
+ mem_size -= current_fill_size;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_lsdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_lsdma.h
new file mode 100644
index 000000000000..c61ba58c5ee0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_lsdma.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_LSDMA_H__
+#define __AMDGPU_LSDMA_H__
+
+struct amdgpu_lsdma {
+ const struct amdgpu_lsdma_funcs *funcs;
+};
+
+struct amdgpu_lsdma_funcs {
+ int (*copy_mem)(struct amdgpu_device *adev, uint64_t src_addr,
+ uint64_t dst_addr, uint64_t size);
+ int (*fill_mem)(struct amdgpu_device *adev, uint64_t dst_addr,
+ uint32_t data, uint64_t size);
+ void (*update_memory_power_gating)(struct amdgpu_device *adev, bool enable);
+};
+
+int amdgpu_lsdma_copy_mem(struct amdgpu_device *adev, uint64_t src_addr,
+ uint64_t dst_addr, uint64_t mem_size);
+int amdgpu_lsdma_fill_mem(struct amdgpu_device *adev, uint64_t dst_addr,
+ uint32_t data, uint64_t mem_size);
+int amdgpu_lsdma_wait_for(struct amdgpu_device *adev, uint32_t reg_index,
+ uint32_t reg_val, uint32_t mask);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
new file mode 100644
index 000000000000..3ca03b5e0f91
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
@@ -0,0 +1,630 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu_ras.h"
+#include "amdgpu.h"
+#include "amdgpu_mca.h"
+
+#include "umc/umc_6_7_0_offset.h"
+#include "umc/umc_6_7_0_sh_mask.h"
+
+static bool amdgpu_mca_is_deferred_error(struct amdgpu_device *adev,
+ uint64_t mc_status)
+{
+ if (adev->umc.ras->check_ecc_err_status)
+ return adev->umc.ras->check_ecc_err_status(adev,
+ AMDGPU_MCA_ERROR_TYPE_DE, &mc_status);
+
+ return false;
+}
+
+void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
+ uint64_t mc_status_addr,
+ unsigned long *error_count)
+{
+ uint64_t mc_status = RREG64_PCIE(mc_status_addr);
+
+ if (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+ REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
+ *error_count += 1;
+}
+
+void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
+ uint64_t mc_status_addr,
+ unsigned long *error_count)
+{
+ uint64_t mc_status = RREG64_PCIE(mc_status_addr);
+
+ if ((REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
+ (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
+ REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+ REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
+ REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
+ REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
+ *error_count += 1;
+}
+
+void amdgpu_mca_reset_error_count(struct amdgpu_device *adev,
+ uint64_t mc_status_addr)
+{
+ WREG64_PCIE(mc_status_addr, 0x0ULL);
+}
+
+void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
+ uint64_t mc_status_addr,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+ amdgpu_mca_query_correctable_error_count(adev, mc_status_addr, &(err_data->ce_count));
+ amdgpu_mca_query_uncorrectable_error_count(adev, mc_status_addr, &(err_data->ue_count));
+
+ amdgpu_mca_reset_error_count(adev, mc_status_addr);
+}
+
+int amdgpu_mca_mp0_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_mca_ras_block *ras;
+
+ if (!adev->mca.mp0.ras)
+ return 0;
+
+ ras = adev->mca.mp0.ras;
+
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register mca.mp0 ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "mca.mp0");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->mca.mp0.ras_if = &ras->ras_block.ras_comm;
+
+ return 0;
+}
+
+int amdgpu_mca_mp1_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_mca_ras_block *ras;
+
+ if (!adev->mca.mp1.ras)
+ return 0;
+
+ ras = adev->mca.mp1.ras;
+
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register mca.mp1 ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "mca.mp1");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->mca.mp1.ras_if = &ras->ras_block.ras_comm;
+
+ return 0;
+}
+
+int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_mca_ras_block *ras;
+
+ if (!adev->mca.mpio.ras)
+ return 0;
+
+ ras = adev->mca.mpio.ras;
+
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register mca.mpio ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "mca.mpio");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MCA;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->mca.mpio.ras_if = &ras->ras_block.ras_comm;
+
+ return 0;
+}
+
+static void amdgpu_mca_bank_set_init(struct mca_bank_set *mca_set)
+{
+ if (!mca_set)
+ return;
+
+ memset(mca_set, 0, sizeof(*mca_set));
+ INIT_LIST_HEAD(&mca_set->list);
+}
+
+static int amdgpu_mca_bank_set_add_entry(struct mca_bank_set *mca_set, struct mca_bank_entry *entry)
+{
+ struct mca_bank_node *node;
+
+ if (!entry)
+ return -EINVAL;
+
+ node = kvzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ memcpy(&node->entry, entry, sizeof(*entry));
+
+ INIT_LIST_HEAD(&node->node);
+ list_add_tail(&node->node, &mca_set->list);
+
+ mca_set->nr_entries++;
+
+ return 0;
+}
+
+static int amdgpu_mca_bank_set_merge(struct mca_bank_set *mca_set, struct mca_bank_set *new)
+{
+ struct mca_bank_node *node;
+
+ list_for_each_entry(node, &new->list, node)
+ amdgpu_mca_bank_set_add_entry(mca_set, &node->entry);
+
+ return 0;
+}
+
+static void amdgpu_mca_bank_set_remove_node(struct mca_bank_set *mca_set, struct mca_bank_node *node)
+{
+ if (!node)
+ return;
+
+ list_del(&node->node);
+ kvfree(node);
+
+ mca_set->nr_entries--;
+}
+
+static void amdgpu_mca_bank_set_release(struct mca_bank_set *mca_set)
+{
+ struct mca_bank_node *node, *tmp;
+
+ if (list_empty(&mca_set->list))
+ return;
+
+ list_for_each_entry_safe(node, tmp, &mca_set->list, node)
+ amdgpu_mca_bank_set_remove_node(mca_set, node);
+}
+
+void amdgpu_mca_smu_init_funcs(struct amdgpu_device *adev, const struct amdgpu_mca_smu_funcs *mca_funcs)
+{
+ struct amdgpu_mca *mca = &adev->mca;
+
+ mca->mca_funcs = mca_funcs;
+}
+
+int amdgpu_mca_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_mca *mca = &adev->mca;
+ struct mca_bank_cache *mca_cache;
+ int i;
+
+ atomic_set(&mca->ue_update_flag, 0);
+
+ for (i = 0; i < ARRAY_SIZE(mca->mca_caches); i++) {
+ mca_cache = &mca->mca_caches[i];
+ mutex_init(&mca_cache->lock);
+ amdgpu_mca_bank_set_init(&mca_cache->mca_set);
+ }
+
+ return 0;
+}
+
+void amdgpu_mca_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_mca *mca = &adev->mca;
+ struct mca_bank_cache *mca_cache;
+ int i;
+
+ atomic_set(&mca->ue_update_flag, 0);
+
+ for (i = 0; i < ARRAY_SIZE(mca->mca_caches); i++) {
+ mca_cache = &mca->mca_caches[i];
+ amdgpu_mca_bank_set_release(&mca_cache->mca_set);
+ mutex_destroy(&mca_cache->lock);
+ }
+}
+
+int amdgpu_mca_reset(struct amdgpu_device *adev)
+{
+ amdgpu_mca_fini(adev);
+
+ return amdgpu_mca_init(adev);
+}
+
+int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
+{
+ const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
+
+ if (mca_funcs && mca_funcs->mca_set_debug_mode)
+ return mca_funcs->mca_set_debug_mode(adev, enable);
+
+ return -EOPNOTSUPP;
+}
+
+static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, struct mca_bank_entry *entry,
+ struct ras_query_context *qctx)
+{
+ u64 event_id = qctx ? qctx->evid.event_id : RAS_EVENT_INVALID_ID;
+
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "Accelerator Check Architecture events logged\n");
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].STATUS=0x%016llx\n",
+ idx, entry->regs[MCA_REG_IDX_STATUS]);
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].ADDR=0x%016llx\n",
+ idx, entry->regs[MCA_REG_IDX_ADDR]);
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].MISC0=0x%016llx\n",
+ idx, entry->regs[MCA_REG_IDX_MISC0]);
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].IPID=0x%016llx\n",
+ idx, entry->regs[MCA_REG_IDX_IPID]);
+ RAS_EVENT_LOG(adev, event_id, HW_ERR "aca entry[%02d].SYND=0x%016llx\n",
+ idx, entry->regs[MCA_REG_IDX_SYND]);
+}
+
+static int amdgpu_mca_smu_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count)
+{
+ const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
+
+ if (!count)
+ return -EINVAL;
+
+ if (mca_funcs && mca_funcs->mca_get_valid_mca_count)
+ return mca_funcs->mca_get_valid_mca_count(adev, type, count);
+
+ return -EOPNOTSUPP;
+}
+
+static int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
+ int idx, struct mca_bank_entry *entry)
+{
+ const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
+ int count;
+
+ if (!mca_funcs || !mca_funcs->mca_get_mca_entry)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case AMDGPU_MCA_ERROR_TYPE_UE:
+ count = mca_funcs->max_ue_count;
+ break;
+ case AMDGPU_MCA_ERROR_TYPE_CE:
+ count = mca_funcs->max_ce_count;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (idx >= count)
+ return -EINVAL;
+
+ return mca_funcs->mca_get_mca_entry(adev, type, idx, entry);
+}
+
+static bool amdgpu_mca_bank_should_update(struct amdgpu_device *adev, enum amdgpu_mca_error_type type)
+{
+ struct amdgpu_mca *mca = &adev->mca;
+ bool ret = true;
+
+ /*
+ * Because the UE Valid MCA count will only be cleared after reset,
+ * in order to avoid repeated counting of the error count,
+ * the aca bank is only updated once during the gpu recovery stage.
+ */
+ if (type == AMDGPU_MCA_ERROR_TYPE_UE) {
+ if (amdgpu_ras_intr_triggered())
+ ret = atomic_cmpxchg(&mca->ue_update_flag, 0, 1) == 0;
+ else
+ atomic_set(&mca->ue_update_flag, 0);
+ }
+
+ return ret;
+}
+
+static bool amdgpu_mca_bank_should_dump(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
+ struct mca_bank_entry *entry)
+{
+ bool ret;
+
+ switch (type) {
+ case AMDGPU_MCA_ERROR_TYPE_CE:
+ ret = amdgpu_mca_is_deferred_error(adev, entry->regs[MCA_REG_IDX_STATUS]);
+ break;
+ case AMDGPU_MCA_ERROR_TYPE_UE:
+ default:
+ ret = true;
+ break;
+ }
+
+ return ret;
+}
+
+static int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set,
+ struct ras_query_context *qctx)
+{
+ struct mca_bank_entry entry;
+ uint32_t count = 0, i;
+ int ret;
+
+ if (!mca_set)
+ return -EINVAL;
+
+ if (!amdgpu_mca_bank_should_update(adev, type))
+ return 0;
+
+ ret = amdgpu_mca_smu_get_valid_mca_count(adev, type, &count);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ memset(&entry, 0, sizeof(entry));
+ ret = amdgpu_mca_smu_get_mca_entry(adev, type, i, &entry);
+ if (ret)
+ return ret;
+
+ amdgpu_mca_bank_set_add_entry(mca_set, &entry);
+
+ if (amdgpu_mca_bank_should_dump(adev, type, &entry))
+ amdgpu_mca_smu_mca_bank_dump(adev, i, &entry, qctx);
+ }
+
+ return 0;
+}
+
+static int amdgpu_mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
+ enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
+{
+ const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
+
+ if (!count || !entry)
+ return -EINVAL;
+
+ if (!mca_funcs || !mca_funcs->mca_parse_mca_error_count)
+ return -EOPNOTSUPP;
+
+ return mca_funcs->mca_parse_mca_error_count(adev, blk, type, entry, count);
+}
+
+static int amdgpu_mca_dispatch_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
+ struct mca_bank_set *mca_set, struct ras_err_data *err_data)
+{
+ struct amdgpu_smuio_mcm_config_info mcm_info;
+ struct mca_bank_node *node, *tmp;
+ struct mca_bank_entry *entry;
+ uint32_t count;
+ int ret;
+
+ if (!mca_set)
+ return -EINVAL;
+
+ if (!mca_set->nr_entries)
+ return 0;
+
+ list_for_each_entry_safe(node, tmp, &mca_set->list, node) {
+ entry = &node->entry;
+
+ count = 0;
+ ret = amdgpu_mca_smu_parse_mca_error_count(adev, blk, type, entry, &count);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
+ if (!count)
+ continue;
+
+ memset(&mcm_info, 0, sizeof(mcm_info));
+
+ mcm_info.socket_id = entry->info.socket_id;
+ mcm_info.die_id = entry->info.aid;
+
+ if (type == AMDGPU_MCA_ERROR_TYPE_UE) {
+ amdgpu_ras_error_statistic_ue_count(err_data,
+ &mcm_info, (uint64_t)count);
+ } else {
+ if (amdgpu_mca_is_deferred_error(adev, entry->regs[MCA_REG_IDX_STATUS]))
+ amdgpu_ras_error_statistic_de_count(err_data,
+ &mcm_info, (uint64_t)count);
+ else
+ amdgpu_ras_error_statistic_ce_count(err_data,
+ &mcm_info, (uint64_t)count);
+ }
+
+ amdgpu_mca_bank_set_remove_node(mca_set, node);
+ }
+
+ return 0;
+}
+
+static int amdgpu_mca_add_mca_set_to_cache(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, struct mca_bank_set *new)
+{
+ struct mca_bank_cache *mca_cache = &adev->mca.mca_caches[type];
+ int ret;
+
+ mutex_lock(&mca_cache->lock);
+ ret = amdgpu_mca_bank_set_merge(&mca_cache->mca_set, new);
+ mutex_unlock(&mca_cache->lock);
+
+ return ret;
+}
+
+int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
+ struct ras_err_data *err_data, struct ras_query_context *qctx)
+{
+ struct mca_bank_set mca_set;
+ struct mca_bank_cache *mca_cache = &adev->mca.mca_caches[type];
+ int ret;
+
+ amdgpu_mca_bank_set_init(&mca_set);
+
+ ret = amdgpu_mca_smu_get_mca_set(adev, type, &mca_set, qctx);
+ if (ret)
+ goto out_mca_release;
+
+ ret = amdgpu_mca_dispatch_mca_set(adev, blk, type, &mca_set, err_data);
+ if (ret)
+ goto out_mca_release;
+
+ /* add remain mca bank to mca cache */
+ if (mca_set.nr_entries) {
+ ret = amdgpu_mca_add_mca_set_to_cache(adev, type, &mca_set);
+ if (ret)
+ goto out_mca_release;
+ }
+
+ /* dispatch mca set again if mca cache has valid data */
+ mutex_lock(&mca_cache->lock);
+ if (mca_cache->mca_set.nr_entries)
+ ret = amdgpu_mca_dispatch_mca_set(adev, blk, type, &mca_cache->mca_set, err_data);
+ mutex_unlock(&mca_cache->lock);
+
+out_mca_release:
+ amdgpu_mca_bank_set_release(&mca_set);
+
+ return ret;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_mca_smu_debug_mode_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ int ret;
+
+ ret = amdgpu_ras_set_mca_debug_mode(adev, val ? true : false);
+ if (ret)
+ return ret;
+
+ dev_info(adev->dev, "amdgpu set smu mca debug mode %s success\n", val ? "on" : "off");
+
+ return 0;
+}
+
+static void mca_dump_entry(struct seq_file *m, struct mca_bank_entry *entry)
+{
+ int i, idx = entry->idx;
+ int reg_idx_array[] = {
+ MCA_REG_IDX_STATUS,
+ MCA_REG_IDX_ADDR,
+ MCA_REG_IDX_MISC0,
+ MCA_REG_IDX_IPID,
+ MCA_REG_IDX_SYND,
+ };
+
+ seq_printf(m, "mca entry[%d].type: %s\n", idx, entry->type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE");
+ seq_printf(m, "mca entry[%d].ip: %d\n", idx, entry->ip);
+ seq_printf(m, "mca entry[%d].info: socketid:%d aid:%d hwid:0x%03x mcatype:0x%04x\n",
+ idx, entry->info.socket_id, entry->info.aid, entry->info.hwid, entry->info.mcatype);
+
+ for (i = 0; i < ARRAY_SIZE(reg_idx_array); i++)
+ seq_printf(m, "mca entry[%d].regs[%d]: 0x%016llx\n", idx, reg_idx_array[i], entry->regs[reg_idx_array[i]]);
+}
+
+static int mca_dump_show(struct seq_file *m, enum amdgpu_mca_error_type type)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct mca_bank_node *node;
+ struct mca_bank_set mca_set;
+ struct ras_query_context qctx;
+ int ret;
+
+ amdgpu_mca_bank_set_init(&mca_set);
+
+ qctx.evid.event_id = RAS_EVENT_INVALID_ID;
+ ret = amdgpu_mca_smu_get_mca_set(adev, type, &mca_set, &qctx);
+ if (ret)
+ goto err_free_mca_set;
+
+ seq_printf(m, "amdgpu smu %s valid mca count: %d\n",
+ type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE", mca_set.nr_entries);
+
+ if (!mca_set.nr_entries)
+ goto err_free_mca_set;
+
+ list_for_each_entry(node, &mca_set.list, node)
+ mca_dump_entry(m, &node->entry);
+
+ /* add mca bank to mca bank cache */
+ ret = amdgpu_mca_add_mca_set_to_cache(adev, type, &mca_set);
+
+err_free_mca_set:
+ amdgpu_mca_bank_set_release(&mca_set);
+
+ return ret;
+}
+
+static int mca_dump_ce_show(struct seq_file *m, void *unused)
+{
+ return mca_dump_show(m, AMDGPU_MCA_ERROR_TYPE_CE);
+}
+
+static int mca_dump_ce_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mca_dump_ce_show, inode->i_private);
+}
+
+static const struct file_operations mca_ce_dump_debug_fops = {
+ .owner = THIS_MODULE,
+ .open = mca_dump_ce_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int mca_dump_ue_show(struct seq_file *m, void *unused)
+{
+ return mca_dump_show(m, AMDGPU_MCA_ERROR_TYPE_UE);
+}
+
+static int mca_dump_ue_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mca_dump_ue_show, inode->i_private);
+}
+
+static const struct file_operations mca_ue_dump_debug_fops = {
+ .owner = THIS_MODULE,
+ .open = mca_dump_ue_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+DEFINE_DEBUGFS_ATTRIBUTE(mca_debug_mode_fops, NULL, amdgpu_mca_smu_debug_mode_set, "%llu\n");
+#endif
+
+void amdgpu_mca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root)
+{
+#if defined(CONFIG_DEBUG_FS)
+ if (!root)
+ return;
+
+ debugfs_create_file("mca_debug_mode", 0200, root, adev, &mca_debug_mode_fops);
+ debugfs_create_file("mca_ue_dump", 0400, root, adev, &mca_ue_dump_debug_fops);
+ debugfs_create_file("mca_ce_dump", 0400, root, adev, &mca_ce_dump_debug_fops);
+#endif
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
new file mode 100644
index 000000000000..e80323ff90c1
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __AMDGPU_MCA_H__
+#define __AMDGPU_MCA_H__
+
+#include "amdgpu_ras.h"
+
+#define MCA_MAX_REGS_COUNT (16)
+
+#define MCA_REG_FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> l)
+#define MCA_REG__STATUS__VAL(x) MCA_REG_FIELD(x, 63, 63)
+#define MCA_REG__STATUS__OVERFLOW(x) MCA_REG_FIELD(x, 62, 62)
+#define MCA_REG__STATUS__UC(x) MCA_REG_FIELD(x, 61, 61)
+#define MCA_REG__STATUS__EN(x) MCA_REG_FIELD(x, 60, 60)
+#define MCA_REG__STATUS__MISCV(x) MCA_REG_FIELD(x, 59, 59)
+#define MCA_REG__STATUS__ADDRV(x) MCA_REG_FIELD(x, 58, 58)
+#define MCA_REG__STATUS__PCC(x) MCA_REG_FIELD(x, 57, 57)
+#define MCA_REG__STATUS__ERRCOREIDVAL(x) MCA_REG_FIELD(x, 56, 56)
+#define MCA_REG__STATUS__TCC(x) MCA_REG_FIELD(x, 55, 55)
+#define MCA_REG__STATUS__SYNDV(x) MCA_REG_FIELD(x, 53, 53)
+#define MCA_REG__STATUS__CECC(x) MCA_REG_FIELD(x, 46, 46)
+#define MCA_REG__STATUS__UECC(x) MCA_REG_FIELD(x, 45, 45)
+#define MCA_REG__STATUS__DEFERRED(x) MCA_REG_FIELD(x, 44, 44)
+#define MCA_REG__STATUS__POISON(x) MCA_REG_FIELD(x, 43, 43)
+#define MCA_REG__STATUS__SCRUB(x) MCA_REG_FIELD(x, 40, 40)
+#define MCA_REG__STATUS__ERRCOREID(x) MCA_REG_FIELD(x, 37, 32)
+#define MCA_REG__STATUS__ADDRLSB(x) MCA_REG_FIELD(x, 29, 24)
+#define MCA_REG__STATUS__ERRORCODEEXT(x) MCA_REG_FIELD(x, 21, 16)
+#define MCA_REG__STATUS__ERRORCODE(x) MCA_REG_FIELD(x, 15, 0)
+
+#define MCA_REG__MISC0__ERRCNT(x) MCA_REG_FIELD(x, 43, 32)
+
+#define MCA_REG__SYND__ERRORINFORMATION(x) MCA_REG_FIELD(x, 17, 0)
+
+enum amdgpu_mca_ip {
+ AMDGPU_MCA_IP_UNKNOW = -1,
+ AMDGPU_MCA_IP_PSP = 0,
+ AMDGPU_MCA_IP_SDMA,
+ AMDGPU_MCA_IP_GC,
+ AMDGPU_MCA_IP_SMU,
+ AMDGPU_MCA_IP_MP5,
+ AMDGPU_MCA_IP_UMC,
+ AMDGPU_MCA_IP_PCS_XGMI,
+ AMDGPU_MCA_IP_COUNT,
+};
+
+enum amdgpu_mca_error_type {
+ AMDGPU_MCA_ERROR_TYPE_UE = 0,
+ AMDGPU_MCA_ERROR_TYPE_CE,
+ AMDGPU_MCA_ERROR_TYPE_DE,
+};
+
+struct amdgpu_mca_ras_block {
+ struct amdgpu_ras_block_object ras_block;
+};
+
+struct amdgpu_mca_ras {
+ struct ras_common_if *ras_if;
+ struct amdgpu_mca_ras_block *ras;
+};
+
+struct mca_bank_set {
+ int nr_entries;
+ struct list_head list;
+};
+
+struct mca_bank_cache {
+ struct mca_bank_set mca_set;
+ struct mutex lock;
+};
+
+struct amdgpu_mca {
+ struct amdgpu_mca_ras mp0;
+ struct amdgpu_mca_ras mp1;
+ struct amdgpu_mca_ras mpio;
+ const struct amdgpu_mca_smu_funcs *mca_funcs;
+ struct mca_bank_cache mca_caches[AMDGPU_MCA_ERROR_TYPE_DE];
+ atomic_t ue_update_flag;
+};
+
+enum mca_reg_idx {
+ MCA_REG_IDX_STATUS = 1,
+ MCA_REG_IDX_ADDR = 2,
+ MCA_REG_IDX_MISC0 = 3,
+ MCA_REG_IDX_IPID = 5,
+ MCA_REG_IDX_SYND = 6,
+ MCA_REG_IDX_COUNT = 16,
+};
+
+struct mca_bank_info {
+ int socket_id;
+ int aid;
+ int hwid;
+ int mcatype;
+};
+
+struct mca_bank_entry {
+ int idx;
+ enum amdgpu_mca_error_type type;
+ enum amdgpu_mca_ip ip;
+ struct mca_bank_info info;
+ uint64_t regs[MCA_MAX_REGS_COUNT];
+};
+
+struct mca_bank_node {
+ struct mca_bank_entry entry;
+ struct list_head node;
+};
+
+struct amdgpu_mca_smu_funcs {
+ int max_ue_count;
+ int max_ce_count;
+ int (*mca_set_debug_mode)(struct amdgpu_device *adev, bool enable);
+ int (*mca_parse_mca_error_count)(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
+ struct mca_bank_entry *entry, uint32_t *count);
+ int (*mca_get_valid_mca_count)(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
+ uint32_t *count);
+ int (*mca_get_mca_entry)(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
+ int idx, struct mca_bank_entry *entry);
+};
+
+void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
+ uint64_t mc_status_addr,
+ unsigned long *error_count);
+
+void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
+ uint64_t mc_status_addr,
+ unsigned long *error_count);
+
+void amdgpu_mca_reset_error_count(struct amdgpu_device *adev,
+ uint64_t mc_status_addr);
+
+void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
+ uint64_t mc_status_addr,
+ void *ras_error_status);
+int amdgpu_mca_mp0_ras_sw_init(struct amdgpu_device *adev);
+int amdgpu_mca_mp1_ras_sw_init(struct amdgpu_device *adev);
+int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev);
+
+void amdgpu_mca_smu_init_funcs(struct amdgpu_device *adev, const struct amdgpu_mca_smu_funcs *mca_funcs);
+int amdgpu_mca_init(struct amdgpu_device *adev);
+void amdgpu_mca_fini(struct amdgpu_device *adev);
+int amdgpu_mca_reset(struct amdgpu_device *adev);
+int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable);
+int amdgpu_mca_smu_get_mca_set_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
+ enum amdgpu_mca_error_type type, uint32_t *total);
+void amdgpu_mca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root);
+int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
+ struct ras_err_data *err_data, struct ras_query_context *qctx);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
new file mode 100644
index 000000000000..5bf9be073cdd
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -0,0 +1,770 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include <drm/drm_exec.h>
+
+#include "amdgpu_mes.h"
+#include "amdgpu.h"
+#include "soc15_common.h"
+#include "amdgpu_mes_ctx.h"
+
+#define AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
+#define AMDGPU_ONE_DOORBELL_SIZE 8
+
+int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev)
+{
+ return roundup(AMDGPU_ONE_DOORBELL_SIZE *
+ AMDGPU_MES_MAX_NUM_OF_QUEUES_PER_PROCESS,
+ PAGE_SIZE);
+}
+
+static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev)
+{
+ int i;
+ struct amdgpu_mes *mes = &adev->mes;
+
+ /* Bitmap for dynamic allocation of kernel doorbells */
+ mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL);
+ if (!mes->doorbell_bitmap) {
+ dev_err(adev->dev, "Failed to allocate MES doorbell bitmap\n");
+ return -ENOMEM;
+ }
+
+ mes->num_mes_dbs = PAGE_SIZE / AMDGPU_ONE_DOORBELL_SIZE;
+ for (i = 0; i < AMDGPU_MES_PRIORITY_NUM_LEVELS; i++) {
+ adev->mes.aggregated_doorbells[i] = mes->db_start_dw_offset + i * 2;
+ set_bit(i, mes->doorbell_bitmap);
+ }
+
+ return 0;
+}
+
+static int amdgpu_mes_event_log_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (!amdgpu_mes_log_enable)
+ return 0;
+
+ r = amdgpu_bo_create_kernel(adev, adev->mes.event_log_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->mes.event_log_gpu_obj,
+ &adev->mes.event_log_gpu_addr,
+ &adev->mes.event_log_cpu_addr);
+ if (r) {
+ dev_warn(adev->dev, "failed to create MES event log buffer (%d)", r);
+ return r;
+ }
+
+ memset(adev->mes.event_log_cpu_addr, 0, adev->mes.event_log_size);
+
+ return 0;
+
+}
+
+static void amdgpu_mes_doorbell_free(struct amdgpu_device *adev)
+{
+ bitmap_free(adev->mes.doorbell_bitmap);
+}
+
+int amdgpu_mes_init(struct amdgpu_device *adev)
+{
+ int i, r, num_pipes;
+
+ adev->mes.adev = adev;
+
+ idr_init(&adev->mes.pasid_idr);
+ idr_init(&adev->mes.gang_id_idr);
+ idr_init(&adev->mes.queue_id_idr);
+ ida_init(&adev->mes.doorbell_ida);
+ spin_lock_init(&adev->mes.queue_id_lock);
+ mutex_init(&adev->mes.mutex_hidden);
+
+ for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++)
+ spin_lock_init(&adev->mes.ring_lock[i]);
+
+ adev->mes.total_max_queue = AMDGPU_FENCE_MES_QUEUE_ID_MASK;
+ adev->mes.vmid_mask_mmhub = 0xffffff00;
+ adev->mes.vmid_mask_gfxhub = adev->gfx.disable_kq ? 0xfffffffe : 0xffffff00;
+
+ num_pipes = adev->gfx.me.num_pipe_per_me * adev->gfx.me.num_me;
+ if (num_pipes > AMDGPU_MES_MAX_GFX_PIPES)
+ dev_warn(adev->dev, "more gfx pipes than supported by MES! (%d vs %d)\n",
+ num_pipes, AMDGPU_MES_MAX_GFX_PIPES);
+
+ for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++) {
+ if (i >= num_pipes)
+ break;
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
+ IP_VERSION(12, 0, 0))
+ /*
+ * GFX V12 has only one GFX pipe, but 8 queues in it.
+ * GFX pipe 0 queue 0 is being used by Kernel queue.
+ * Set GFX pipe 0 queue 1-7 for MES scheduling
+ * mask = 1111 1110b
+ */
+ adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0xFF : 0xFE;
+ else
+ /*
+ * GFX pipe 0 queue 0 is being used by Kernel queue.
+ * Set GFX pipe 0 queue 1 for MES scheduling
+ * mask = 10b
+ */
+ adev->mes.gfx_hqd_mask[i] = adev->gfx.disable_kq ? 0x3 : 0x2;
+ }
+
+ num_pipes = adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_mec;
+ if (num_pipes > AMDGPU_MES_MAX_COMPUTE_PIPES)
+ dev_warn(adev->dev, "more compute pipes than supported by MES! (%d vs %d)\n",
+ num_pipes, AMDGPU_MES_MAX_COMPUTE_PIPES);
+
+ for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) {
+ if (i >= num_pipes)
+ break;
+ adev->mes.compute_hqd_mask[i] = adev->gfx.disable_kq ? 0xF : 0xC;
+ }
+
+ num_pipes = adev->sdma.num_instances;
+ if (num_pipes > AMDGPU_MES_MAX_SDMA_PIPES)
+ dev_warn(adev->dev, "more SDMA pipes than supported by MES! (%d vs %d)\n",
+ num_pipes, AMDGPU_MES_MAX_SDMA_PIPES);
+
+ for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) {
+ if (i >= num_pipes)
+ break;
+ adev->mes.sdma_hqd_mask[i] = 0xfc;
+ }
+
+ for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
+ r = amdgpu_device_wb_get(adev, &adev->mes.sch_ctx_offs[i]);
+ if (r) {
+ dev_err(adev->dev,
+ "(%d) ring trail_fence_offs wb alloc failed\n",
+ r);
+ goto error;
+ }
+ adev->mes.sch_ctx_gpu_addr[i] =
+ adev->wb.gpu_addr + (adev->mes.sch_ctx_offs[i] * 4);
+ adev->mes.sch_ctx_ptr[i] =
+ (uint64_t *)&adev->wb.wb[adev->mes.sch_ctx_offs[i]];
+
+ r = amdgpu_device_wb_get(adev,
+ &adev->mes.query_status_fence_offs[i]);
+ if (r) {
+ dev_err(adev->dev,
+ "(%d) query_status_fence_offs wb alloc failed\n",
+ r);
+ goto error;
+ }
+ adev->mes.query_status_fence_gpu_addr[i] = adev->wb.gpu_addr +
+ (adev->mes.query_status_fence_offs[i] * 4);
+ adev->mes.query_status_fence_ptr[i] =
+ (uint64_t *)&adev->wb.wb[adev->mes.query_status_fence_offs[i]];
+ }
+
+ r = amdgpu_mes_doorbell_init(adev);
+ if (r)
+ goto error;
+
+ r = amdgpu_mes_event_log_init(adev);
+ if (r)
+ goto error_doorbell;
+
+ if (adev->mes.hung_queue_db_array_size) {
+ r = amdgpu_bo_create_kernel(adev,
+ adev->mes.hung_queue_db_array_size * sizeof(u32),
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->mes.hung_queue_db_array_gpu_obj,
+ &adev->mes.hung_queue_db_array_gpu_addr,
+ &adev->mes.hung_queue_db_array_cpu_addr);
+ if (r) {
+ dev_warn(adev->dev, "failed to create MES hung db array buffer (%d)", r);
+ goto error_doorbell;
+ }
+ }
+
+ return 0;
+
+error_doorbell:
+ amdgpu_mes_doorbell_free(adev);
+error:
+ for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
+ if (adev->mes.sch_ctx_ptr[i])
+ amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
+ if (adev->mes.query_status_fence_ptr[i])
+ amdgpu_device_wb_free(adev,
+ adev->mes.query_status_fence_offs[i]);
+ }
+
+ idr_destroy(&adev->mes.pasid_idr);
+ idr_destroy(&adev->mes.gang_id_idr);
+ idr_destroy(&adev->mes.queue_id_idr);
+ ida_destroy(&adev->mes.doorbell_ida);
+ mutex_destroy(&adev->mes.mutex_hidden);
+ return r;
+}
+
+void amdgpu_mes_fini(struct amdgpu_device *adev)
+{
+ int i;
+
+ amdgpu_bo_free_kernel(&adev->mes.hung_queue_db_array_gpu_obj,
+ &adev->mes.hung_queue_db_array_gpu_addr,
+ &adev->mes.hung_queue_db_array_cpu_addr);
+
+ amdgpu_bo_free_kernel(&adev->mes.event_log_gpu_obj,
+ &adev->mes.event_log_gpu_addr,
+ &adev->mes.event_log_cpu_addr);
+
+ for (i = 0; i < AMDGPU_MAX_MES_PIPES; i++) {
+ if (adev->mes.sch_ctx_ptr[i])
+ amdgpu_device_wb_free(adev, adev->mes.sch_ctx_offs[i]);
+ if (adev->mes.query_status_fence_ptr[i])
+ amdgpu_device_wb_free(adev,
+ adev->mes.query_status_fence_offs[i]);
+ }
+
+ amdgpu_mes_doorbell_free(adev);
+
+ idr_destroy(&adev->mes.pasid_idr);
+ idr_destroy(&adev->mes.gang_id_idr);
+ idr_destroy(&adev->mes.queue_id_idr);
+ ida_destroy(&adev->mes.doorbell_ida);
+ mutex_destroy(&adev->mes.mutex_hidden);
+}
+
+int amdgpu_mes_suspend(struct amdgpu_device *adev)
+{
+ struct mes_suspend_gang_input input;
+ int r;
+
+ if (!amdgpu_mes_suspend_resume_all_supported(adev))
+ return 0;
+
+ memset(&input, 0x0, sizeof(struct mes_suspend_gang_input));
+ input.suspend_all_gangs = 1;
+
+ /*
+ * Avoid taking any other locks under MES lock to avoid circular
+ * lock dependencies.
+ */
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->suspend_gang(&adev->mes, &input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r)
+ dev_err(adev->dev, "failed to suspend all gangs");
+
+ return r;
+}
+
+int amdgpu_mes_resume(struct amdgpu_device *adev)
+{
+ struct mes_resume_gang_input input;
+ int r;
+
+ if (!amdgpu_mes_suspend_resume_all_supported(adev))
+ return 0;
+
+ memset(&input, 0x0, sizeof(struct mes_resume_gang_input));
+ input.resume_all_gangs = 1;
+
+ /*
+ * Avoid taking any other locks under MES lock to avoid circular
+ * lock dependencies.
+ */
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->resume_gang(&adev->mes, &input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r)
+ dev_err(adev->dev, "failed to resume all gangs");
+
+ return r;
+}
+
+int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ struct mes_map_legacy_queue_input queue_input;
+ int r;
+
+ memset(&queue_input, 0, sizeof(queue_input));
+
+ queue_input.queue_type = ring->funcs->type;
+ queue_input.doorbell_offset = ring->doorbell_index;
+ queue_input.pipe_id = ring->pipe;
+ queue_input.queue_id = ring->queue;
+ queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
+ queue_input.wptr_addr = ring->wptr_gpu_addr;
+
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r)
+ dev_err(adev->dev, "failed to map legacy queue\n");
+
+ return r;
+}
+
+int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ enum amdgpu_unmap_queues_action action,
+ u64 gpu_addr, u64 seq)
+{
+ struct mes_unmap_legacy_queue_input queue_input;
+ int r;
+
+ queue_input.action = action;
+ queue_input.queue_type = ring->funcs->type;
+ queue_input.doorbell_offset = ring->doorbell_index;
+ queue_input.pipe_id = ring->pipe;
+ queue_input.queue_id = ring->queue;
+ queue_input.trail_fence_addr = gpu_addr;
+ queue_input.trail_fence_data = seq;
+
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r)
+ dev_err(adev->dev, "failed to unmap legacy queue\n");
+
+ return r;
+}
+
+int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ unsigned int vmid,
+ bool use_mmio)
+{
+ struct mes_reset_queue_input queue_input;
+ int r;
+
+ memset(&queue_input, 0, sizeof(queue_input));
+
+ queue_input.queue_type = ring->funcs->type;
+ queue_input.doorbell_offset = ring->doorbell_index;
+ queue_input.me_id = ring->me;
+ queue_input.pipe_id = ring->pipe;
+ queue_input.queue_id = ring->queue;
+ queue_input.mqd_addr = ring->mqd_obj ? amdgpu_bo_gpu_offset(ring->mqd_obj) : 0;
+ queue_input.wptr_addr = ring->wptr_gpu_addr;
+ queue_input.vmid = vmid;
+ queue_input.use_mmio = use_mmio;
+ queue_input.is_kq = true;
+ if (ring->funcs->type == AMDGPU_RING_TYPE_GFX)
+ queue_input.legacy_gfx = true;
+
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r)
+ dev_err(adev->dev, "failed to reset legacy queue\n");
+
+ return r;
+}
+
+int amdgpu_mes_get_hung_queue_db_array_size(struct amdgpu_device *adev)
+{
+ return adev->mes.hung_queue_db_array_size;
+}
+
+int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev,
+ int queue_type,
+ bool detect_only,
+ unsigned int *hung_db_num,
+ u32 *hung_db_array)
+
+{
+ struct mes_detect_and_reset_queue_input input;
+ u32 *db_array = adev->mes.hung_queue_db_array_cpu_addr;
+ int r, i;
+
+ if (!hung_db_num || !hung_db_array)
+ return -EINVAL;
+
+ if ((queue_type != AMDGPU_RING_TYPE_GFX) &&
+ (queue_type != AMDGPU_RING_TYPE_COMPUTE) &&
+ (queue_type != AMDGPU_RING_TYPE_SDMA))
+ return -EINVAL;
+
+ /* Clear the doorbell array before detection */
+ memset(adev->mes.hung_queue_db_array_cpu_addr, 0,
+ adev->mes.hung_queue_db_array_size * sizeof(u32));
+ input.queue_type = queue_type;
+ input.detect_only = detect_only;
+
+ r = adev->mes.funcs->detect_and_reset_hung_queues(&adev->mes,
+ &input);
+ if (r) {
+ dev_err(adev->dev, "failed to detect and reset\n");
+ } else {
+ *hung_db_num = 0;
+ for (i = 0; i < adev->mes.hung_queue_db_array_size; i++) {
+ if (db_array[i] != AMDGPU_MES_INVALID_DB_OFFSET) {
+ hung_db_array[i] = db_array[i];
+ *hung_db_num += 1;
+ }
+ }
+ }
+
+ return r;
+}
+
+uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg)
+{
+ struct mes_misc_op_input op_input;
+ int r, val = 0;
+ uint32_t addr_offset = 0;
+ uint64_t read_val_gpu_addr;
+ uint32_t *read_val_ptr;
+
+ if (amdgpu_device_wb_get(adev, &addr_offset)) {
+ dev_err(adev->dev, "critical bug! too many mes readers\n");
+ goto error;
+ }
+ read_val_gpu_addr = adev->wb.gpu_addr + (addr_offset * 4);
+ read_val_ptr = (uint32_t *)&adev->wb.wb[addr_offset];
+ op_input.op = MES_MISC_OP_READ_REG;
+ op_input.read_reg.reg_offset = reg;
+ op_input.read_reg.buffer_addr = read_val_gpu_addr;
+
+ if (!adev->mes.funcs->misc_op) {
+ dev_err(adev->dev, "mes rreg is not supported!\n");
+ goto error;
+ }
+
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r)
+ dev_err(adev->dev, "failed to read reg (0x%x)\n", reg);
+ else
+ val = *(read_val_ptr);
+
+error:
+ if (addr_offset)
+ amdgpu_device_wb_free(adev, addr_offset);
+ return val;
+}
+
+int amdgpu_mes_wreg(struct amdgpu_device *adev,
+ uint32_t reg, uint32_t val)
+{
+ struct mes_misc_op_input op_input;
+ int r;
+
+ op_input.op = MES_MISC_OP_WRITE_REG;
+ op_input.write_reg.reg_offset = reg;
+ op_input.write_reg.reg_value = val;
+
+ if (!adev->mes.funcs->misc_op) {
+ dev_err(adev->dev, "mes wreg is not supported!\n");
+ r = -EINVAL;
+ goto error;
+ }
+
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r)
+ dev_err(adev->dev, "failed to write reg (0x%x)\n", reg);
+
+error:
+ return r;
+}
+
+int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
+ uint32_t reg0, uint32_t reg1,
+ uint32_t ref, uint32_t mask)
+{
+ struct mes_misc_op_input op_input;
+ int r;
+
+ op_input.op = MES_MISC_OP_WRM_REG_WR_WAIT;
+ op_input.wrm_reg.reg0 = reg0;
+ op_input.wrm_reg.reg1 = reg1;
+ op_input.wrm_reg.ref = ref;
+ op_input.wrm_reg.mask = mask;
+
+ if (!adev->mes.funcs->misc_op) {
+ dev_err(adev->dev, "mes reg_write_reg_wait is not supported!\n");
+ r = -EINVAL;
+ goto error;
+ }
+
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r)
+ dev_err(adev->dev, "failed to reg_write_reg_wait\n");
+
+error:
+ return r;
+}
+
+int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
+ uint64_t process_context_addr,
+ uint32_t spi_gdbg_per_vmid_cntl,
+ const uint32_t *tcp_watch_cntl,
+ uint32_t flags,
+ bool trap_en)
+{
+ struct mes_misc_op_input op_input = {0};
+ int r;
+
+ if (!adev->mes.funcs->misc_op) {
+ dev_err(adev->dev,
+ "mes set shader debugger is not supported!\n");
+ return -EINVAL;
+ }
+
+ op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
+ op_input.set_shader_debugger.process_context_addr = process_context_addr;
+ op_input.set_shader_debugger.flags.u32all = flags;
+
+ /* use amdgpu mes_flush_shader_debugger instead */
+ if (op_input.set_shader_debugger.flags.process_ctx_flush)
+ return -EINVAL;
+
+ op_input.set_shader_debugger.spi_gdbg_per_vmid_cntl = spi_gdbg_per_vmid_cntl;
+ memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl,
+ sizeof(op_input.set_shader_debugger.tcp_watch_cntl));
+
+ if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >>
+ AMDGPU_MES_API_VERSION_SHIFT) >= 14)
+ op_input.set_shader_debugger.trap_en = trap_en;
+
+ amdgpu_mes_lock(&adev->mes);
+
+ r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
+ if (r)
+ dev_err(adev->dev, "failed to set_shader_debugger\n");
+
+ amdgpu_mes_unlock(&adev->mes);
+
+ return r;
+}
+
+int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
+ uint64_t process_context_addr)
+{
+ struct mes_misc_op_input op_input = {0};
+ int r;
+
+ if (!adev->mes.funcs->misc_op) {
+ dev_err(adev->dev,
+ "mes flush shader debugger is not supported!\n");
+ return -EINVAL;
+ }
+
+ op_input.op = MES_MISC_OP_SET_SHADER_DEBUGGER;
+ op_input.set_shader_debugger.process_context_addr = process_context_addr;
+ op_input.set_shader_debugger.flags.process_ctx_flush = true;
+
+ amdgpu_mes_lock(&adev->mes);
+
+ r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
+ if (r)
+ dev_err(adev->dev, "failed to set_shader_debugger\n");
+
+ amdgpu_mes_unlock(&adev->mes);
+
+ return r;
+}
+
+uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
+ enum amdgpu_mes_priority_level prio)
+{
+ return adev->mes.aggregated_doorbells[prio];
+}
+
+int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
+{
+ const struct mes_firmware_header_v1_0 *mes_hdr;
+ struct amdgpu_firmware_info *info;
+ char ucode_prefix[30];
+ char fw_name[50];
+ bool need_retry = false;
+ u32 *ucode_ptr;
+ int r;
+
+ amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix,
+ sizeof(ucode_prefix));
+ if (adev->enable_uni_mes) {
+ snprintf(fw_name, sizeof(fw_name),
+ "amdgpu/%s_uni_mes.bin", ucode_prefix);
+ } else if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
+ ucode_prefix,
+ pipe == AMDGPU_MES_SCHED_PIPE ? "_2" : "1");
+ need_retry = true;
+ } else {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
+ ucode_prefix,
+ pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
+ }
+
+ r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], AMDGPU_UCODE_REQUIRED,
+ "%s", fw_name);
+ if (r && need_retry && pipe == AMDGPU_MES_SCHED_PIPE) {
+ dev_info(adev->dev, "try to fall back to %s_mes.bin\n", ucode_prefix);
+ r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe],
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_mes.bin", ucode_prefix);
+ }
+
+ if (r)
+ goto out;
+
+ mes_hdr = (const struct mes_firmware_header_v1_0 *)
+ adev->mes.fw[pipe]->data;
+ adev->mes.uc_start_addr[pipe] =
+ le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
+ ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
+ adev->mes.data_start_addr[pipe] =
+ le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
+ ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
+ ucode_ptr = (u32 *)(adev->mes.fw[pipe]->data +
+ sizeof(union amdgpu_firmware_header));
+ adev->mes.fw_version[pipe] =
+ le32_to_cpu(ucode_ptr[24]) & AMDGPU_MES_VERSION_MASK;
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ int ucode, ucode_data;
+
+ if (pipe == AMDGPU_MES_SCHED_PIPE) {
+ ucode = AMDGPU_UCODE_ID_CP_MES;
+ ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
+ } else {
+ ucode = AMDGPU_UCODE_ID_CP_MES1;
+ ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
+ }
+
+ info = &adev->firmware.ucode[ucode];
+ info->ucode_id = ucode;
+ info->fw = adev->mes.fw[pipe];
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
+ PAGE_SIZE);
+
+ info = &adev->firmware.ucode[ucode_data];
+ info->ucode_id = ucode_data;
+ info->fw = adev->mes.fw[pipe];
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
+ PAGE_SIZE);
+ }
+
+ return 0;
+out:
+ amdgpu_ucode_release(&adev->mes.fw[pipe]);
+ return r;
+}
+
+bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
+{
+ uint32_t mes_rev = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK;
+ bool is_supported = false;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0) &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(12, 0, 0) &&
+ mes_rev >= 0x63)
+ is_supported = true;
+
+ return is_supported;
+}
+
+/* Fix me -- node_id is used to identify the correct MES instances in the future */
+static int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev,
+ uint32_t node_id, bool enable)
+{
+ struct mes_misc_op_input op_input = {0};
+ int r;
+
+ op_input.op = MES_MISC_OP_CHANGE_CONFIG;
+ op_input.change_config.option.limit_single_process = enable ? 1 : 0;
+
+ if (!adev->mes.funcs->misc_op) {
+ dev_err(adev->dev, "mes change config is not supported!\n");
+ r = -EINVAL;
+ goto error;
+ }
+
+ amdgpu_mes_lock(&adev->mes);
+ r = adev->mes.funcs->misc_op(&adev->mes, &op_input);
+ amdgpu_mes_unlock(&adev->mes);
+ if (r)
+ dev_err(adev->dev, "failed to change_config.\n");
+
+error:
+ return r;
+}
+
+int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev)
+{
+ int i, r = 0;
+
+ if (adev->enable_mes && adev->gfx.enable_cleaner_shader) {
+ mutex_lock(&adev->enforce_isolation_mutex);
+ for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
+ if (adev->enforce_isolation[i] == AMDGPU_ENFORCE_ISOLATION_ENABLE)
+ r |= amdgpu_mes_set_enforce_isolation(adev, i, true);
+ else
+ r |= amdgpu_mes_set_enforce_isolation(adev, i, false);
+ }
+ mutex_unlock(&adev->enforce_isolation_mutex);
+ }
+ return r;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
+{
+ struct amdgpu_device *adev = m->private;
+ uint32_t *mem = (uint32_t *)(adev->mes.event_log_cpu_addr);
+
+ seq_hex_dump(m, "", DUMP_PREFIX_OFFSET, 32, 4,
+ mem, adev->mes.event_log_size, false);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_mes_event_log);
+
+#endif
+
+void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev)
+{
+
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+ if (adev->enable_mes && amdgpu_mes_log_enable)
+ debugfs_create_file("amdgpu_mes_event_log", 0444, root,
+ adev, &amdgpu_debugfs_mes_event_log_fops);
+
+#endif
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
new file mode 100644
index 000000000000..6b506fc72f58
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -0,0 +1,506 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_MES_H__
+#define __AMDGPU_MES_H__
+
+#include "amdgpu_irq.h"
+#include "kgd_kfd_interface.h"
+#include "amdgpu_gfx.h"
+#include "amdgpu_doorbell.h"
+#include <linux/sched/mm.h>
+
+#define AMDGPU_MES_MAX_COMPUTE_PIPES 8
+#define AMDGPU_MES_MAX_GFX_PIPES 2
+#define AMDGPU_MES_MAX_SDMA_PIPES 2
+
+#define AMDGPU_MES_API_VERSION_SHIFT 12
+#define AMDGPU_MES_FEAT_VERSION_SHIFT 24
+
+#define AMDGPU_MES_VERSION_MASK 0x00000fff
+#define AMDGPU_MES_API_VERSION_MASK 0x00fff000
+#define AMDGPU_MES_FEAT_VERSION_MASK 0xff000000
+#define AMDGPU_MES_MSCRATCH_SIZE 0x40000
+#define AMDGPU_MES_INVALID_DB_OFFSET 0xffffffff
+
+enum amdgpu_mes_priority_level {
+ AMDGPU_MES_PRIORITY_LEVEL_LOW = 0,
+ AMDGPU_MES_PRIORITY_LEVEL_NORMAL = 1,
+ AMDGPU_MES_PRIORITY_LEVEL_MEDIUM = 2,
+ AMDGPU_MES_PRIORITY_LEVEL_HIGH = 3,
+ AMDGPU_MES_PRIORITY_LEVEL_REALTIME = 4,
+ AMDGPU_MES_PRIORITY_NUM_LEVELS
+};
+
+#define AMDGPU_MES_PROC_CTX_SIZE 0x1000 /* one page area */
+#define AMDGPU_MES_GANG_CTX_SIZE 0x1000 /* one page area */
+
+struct amdgpu_mes_funcs;
+
+enum amdgpu_mes_pipe {
+ AMDGPU_MES_SCHED_PIPE = 0,
+ AMDGPU_MES_KIQ_PIPE,
+ AMDGPU_MAX_MES_PIPES = 2,
+};
+
+struct amdgpu_mes {
+ struct amdgpu_device *adev;
+
+ struct mutex mutex_hidden;
+
+ struct idr pasid_idr;
+ struct idr gang_id_idr;
+ struct idr queue_id_idr;
+ struct ida doorbell_ida;
+
+ spinlock_t queue_id_lock;
+
+ uint32_t sched_version;
+ uint32_t kiq_version;
+ uint32_t fw_version[AMDGPU_MAX_MES_PIPES];
+ bool enable_legacy_queue_map;
+
+ uint32_t total_max_queue;
+ uint32_t max_doorbell_slices;
+
+ uint64_t default_process_quantum;
+ uint64_t default_gang_quantum;
+
+ struct amdgpu_ring ring[AMDGPU_MAX_MES_PIPES];
+ spinlock_t ring_lock[AMDGPU_MAX_MES_PIPES];
+
+ const struct firmware *fw[AMDGPU_MAX_MES_PIPES];
+
+ /* mes ucode */
+ struct amdgpu_bo *ucode_fw_obj[AMDGPU_MAX_MES_PIPES];
+ uint64_t ucode_fw_gpu_addr[AMDGPU_MAX_MES_PIPES];
+ uint32_t *ucode_fw_ptr[AMDGPU_MAX_MES_PIPES];
+ uint64_t uc_start_addr[AMDGPU_MAX_MES_PIPES];
+
+ /* mes ucode data */
+ struct amdgpu_bo *data_fw_obj[AMDGPU_MAX_MES_PIPES];
+ uint64_t data_fw_gpu_addr[AMDGPU_MAX_MES_PIPES];
+ uint32_t *data_fw_ptr[AMDGPU_MAX_MES_PIPES];
+ uint64_t data_start_addr[AMDGPU_MAX_MES_PIPES];
+
+ /* eop gpu obj */
+ struct amdgpu_bo *eop_gpu_obj[AMDGPU_MAX_MES_PIPES];
+ uint64_t eop_gpu_addr[AMDGPU_MAX_MES_PIPES];
+
+ void *mqd_backup[AMDGPU_MAX_MES_PIPES];
+ struct amdgpu_irq_src irq[AMDGPU_MAX_MES_PIPES];
+
+ uint32_t vmid_mask_gfxhub;
+ uint32_t vmid_mask_mmhub;
+ uint32_t gfx_hqd_mask[AMDGPU_MES_MAX_GFX_PIPES];
+ uint32_t compute_hqd_mask[AMDGPU_MES_MAX_COMPUTE_PIPES];
+ uint32_t sdma_hqd_mask[AMDGPU_MES_MAX_SDMA_PIPES];
+ uint32_t aggregated_doorbells[AMDGPU_MES_PRIORITY_NUM_LEVELS];
+ uint32_t sch_ctx_offs[AMDGPU_MAX_MES_PIPES];
+ uint64_t sch_ctx_gpu_addr[AMDGPU_MAX_MES_PIPES];
+ uint64_t *sch_ctx_ptr[AMDGPU_MAX_MES_PIPES];
+ uint32_t query_status_fence_offs[AMDGPU_MAX_MES_PIPES];
+ uint64_t query_status_fence_gpu_addr[AMDGPU_MAX_MES_PIPES];
+ uint64_t *query_status_fence_ptr[AMDGPU_MAX_MES_PIPES];
+
+ uint32_t saved_flags;
+
+ /* initialize kiq pipe */
+ int (*kiq_hw_init)(struct amdgpu_device *adev);
+ int (*kiq_hw_fini)(struct amdgpu_device *adev);
+
+ /* MES doorbells */
+ uint32_t db_start_dw_offset;
+ uint32_t num_mes_dbs;
+ unsigned long *doorbell_bitmap;
+
+ /* MES event log buffer */
+ uint32_t event_log_size;
+ struct amdgpu_bo *event_log_gpu_obj;
+ uint64_t event_log_gpu_addr;
+ void *event_log_cpu_addr;
+
+ /* ip specific functions */
+ const struct amdgpu_mes_funcs *funcs;
+
+ /* mes resource_1 bo*/
+ struct amdgpu_bo *resource_1[AMDGPU_MAX_MES_PIPES];
+ uint64_t resource_1_gpu_addr[AMDGPU_MAX_MES_PIPES];
+ void *resource_1_addr[AMDGPU_MAX_MES_PIPES];
+
+ int hung_queue_db_array_size;
+ struct amdgpu_bo *hung_queue_db_array_gpu_obj;
+ uint64_t hung_queue_db_array_gpu_addr;
+ void *hung_queue_db_array_cpu_addr;
+};
+
+struct amdgpu_mes_gang {
+ int gang_id;
+ int priority;
+ int inprocess_gang_priority;
+ int global_priority_level;
+ struct list_head list;
+ struct amdgpu_mes_process *process;
+ struct amdgpu_bo *gang_ctx_bo;
+ uint64_t gang_ctx_gpu_addr;
+ void *gang_ctx_cpu_ptr;
+ uint64_t gang_quantum;
+ struct list_head queue_list;
+};
+
+struct amdgpu_mes_queue {
+ struct list_head list;
+ struct amdgpu_mes_gang *gang;
+ int queue_id;
+ uint64_t doorbell_off;
+ struct amdgpu_bo *mqd_obj;
+ void *mqd_cpu_ptr;
+ uint64_t mqd_gpu_addr;
+ uint64_t wptr_gpu_addr;
+ int queue_type;
+ int paging;
+ struct amdgpu_ring *ring;
+};
+
+struct amdgpu_mes_queue_properties {
+ int queue_type;
+ uint64_t hqd_base_gpu_addr;
+ uint64_t rptr_gpu_addr;
+ uint64_t wptr_gpu_addr;
+ uint64_t wptr_mc_addr;
+ uint32_t queue_size;
+ uint64_t eop_gpu_addr;
+ uint32_t hqd_pipe_priority;
+ uint32_t hqd_queue_priority;
+ bool paging;
+ struct amdgpu_ring *ring;
+ /* out */
+ uint64_t doorbell_off;
+};
+
+struct amdgpu_mes_gang_properties {
+ uint32_t priority;
+ uint32_t gang_quantum;
+ uint32_t inprocess_gang_priority;
+ uint32_t priority_level;
+ int global_priority_level;
+};
+
+struct mes_add_queue_input {
+ uint32_t process_id;
+ uint64_t page_table_base_addr;
+ uint64_t process_va_start;
+ uint64_t process_va_end;
+ uint64_t process_quantum;
+ uint64_t process_context_addr;
+ uint64_t gang_quantum;
+ uint64_t gang_context_addr;
+ uint32_t inprocess_gang_priority;
+ uint32_t gang_global_priority_level;
+ uint32_t doorbell_offset;
+ uint64_t mqd_addr;
+ uint64_t wptr_addr;
+ uint64_t wptr_mc_addr;
+ uint32_t queue_type;
+ uint32_t paging;
+ uint32_t gws_base;
+ uint32_t gws_size;
+ uint64_t tba_addr;
+ uint64_t tma_addr;
+ uint32_t trap_en;
+ uint32_t skip_process_ctx_clear;
+ uint32_t is_kfd_process;
+ uint32_t is_aql_queue;
+ uint32_t queue_size;
+ uint32_t exclusively_scheduled;
+};
+
+struct mes_remove_queue_input {
+ uint32_t doorbell_offset;
+ uint64_t gang_context_addr;
+};
+
+struct mes_map_legacy_queue_input {
+ uint32_t queue_type;
+ uint32_t doorbell_offset;
+ uint32_t pipe_id;
+ uint32_t queue_id;
+ uint64_t mqd_addr;
+ uint64_t wptr_addr;
+};
+
+struct mes_unmap_legacy_queue_input {
+ enum amdgpu_unmap_queues_action action;
+ uint32_t queue_type;
+ uint32_t doorbell_offset;
+ uint32_t pipe_id;
+ uint32_t queue_id;
+ uint64_t trail_fence_addr;
+ uint64_t trail_fence_data;
+};
+
+struct mes_suspend_gang_input {
+ bool suspend_all_gangs;
+ uint64_t gang_context_addr;
+ uint64_t suspend_fence_addr;
+ uint32_t suspend_fence_value;
+};
+
+struct mes_resume_gang_input {
+ bool resume_all_gangs;
+ uint64_t gang_context_addr;
+};
+
+struct mes_reset_queue_input {
+ uint32_t queue_type;
+ uint32_t doorbell_offset;
+ bool use_mmio;
+ uint32_t me_id;
+ uint32_t pipe_id;
+ uint32_t queue_id;
+ uint64_t mqd_addr;
+ uint64_t wptr_addr;
+ uint32_t vmid;
+ bool legacy_gfx;
+ bool is_kq;
+};
+
+struct mes_detect_and_reset_queue_input {
+ uint32_t queue_type;
+ bool detect_only;
+};
+
+struct mes_inv_tlbs_pasid_input {
+ uint32_t xcc_id;
+ uint16_t pasid;
+ uint8_t hub_id;
+ uint8_t flush_type;
+};
+
+enum mes_misc_opcode {
+ MES_MISC_OP_WRITE_REG,
+ MES_MISC_OP_READ_REG,
+ MES_MISC_OP_WRM_REG_WAIT,
+ MES_MISC_OP_WRM_REG_WR_WAIT,
+ MES_MISC_OP_SET_SHADER_DEBUGGER,
+ MES_MISC_OP_CHANGE_CONFIG,
+};
+
+struct mes_misc_op_input {
+ enum mes_misc_opcode op;
+
+ union {
+ struct {
+ uint32_t reg_offset;
+ uint64_t buffer_addr;
+ } read_reg;
+
+ struct {
+ uint32_t reg_offset;
+ uint32_t reg_value;
+ } write_reg;
+
+ struct {
+ uint32_t ref;
+ uint32_t mask;
+ uint32_t reg0;
+ uint32_t reg1;
+ } wrm_reg;
+
+ struct {
+ uint64_t process_context_addr;
+ union {
+ struct {
+ uint32_t single_memop : 1;
+ uint32_t single_alu_op : 1;
+ uint32_t reserved: 29;
+ uint32_t process_ctx_flush: 1;
+ };
+ uint32_t u32all;
+ } flags;
+ uint32_t spi_gdbg_per_vmid_cntl;
+ uint32_t tcp_watch_cntl[4];
+ uint32_t trap_en;
+ } set_shader_debugger;
+
+ struct {
+ union {
+ struct {
+ uint32_t limit_single_process : 1;
+ uint32_t enable_hws_logging_buffer : 1;
+ uint32_t reserved : 30;
+ };
+ uint32_t all;
+ } option;
+ struct {
+ uint32_t tdr_level;
+ uint32_t tdr_delay;
+ } tdr_config;
+ } change_config;
+ };
+};
+
+struct amdgpu_mes_funcs {
+ int (*add_hw_queue)(struct amdgpu_mes *mes,
+ struct mes_add_queue_input *input);
+
+ int (*remove_hw_queue)(struct amdgpu_mes *mes,
+ struct mes_remove_queue_input *input);
+
+ int (*map_legacy_queue)(struct amdgpu_mes *mes,
+ struct mes_map_legacy_queue_input *input);
+
+ int (*unmap_legacy_queue)(struct amdgpu_mes *mes,
+ struct mes_unmap_legacy_queue_input *input);
+
+ int (*suspend_gang)(struct amdgpu_mes *mes,
+ struct mes_suspend_gang_input *input);
+
+ int (*resume_gang)(struct amdgpu_mes *mes,
+ struct mes_resume_gang_input *input);
+
+ int (*misc_op)(struct amdgpu_mes *mes,
+ struct mes_misc_op_input *input);
+
+ int (*reset_hw_queue)(struct amdgpu_mes *mes,
+ struct mes_reset_queue_input *input);
+
+ int (*detect_and_reset_hung_queues)(struct amdgpu_mes *mes,
+ struct mes_detect_and_reset_queue_input *input);
+
+
+ int (*invalidate_tlbs_pasid)(struct amdgpu_mes *mes,
+ struct mes_inv_tlbs_pasid_input *input);
+};
+
+#define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev))
+#define amdgpu_mes_kiq_hw_fini(adev) (adev)->mes.kiq_hw_fini((adev))
+
+int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe);
+int amdgpu_mes_init(struct amdgpu_device *adev);
+void amdgpu_mes_fini(struct amdgpu_device *adev);
+
+int amdgpu_mes_suspend(struct amdgpu_device *adev);
+int amdgpu_mes_resume(struct amdgpu_device *adev);
+
+int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
+int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ enum amdgpu_unmap_queues_action action,
+ u64 gpu_addr, u64 seq);
+int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ unsigned int vmid,
+ bool use_mmio);
+
+int amdgpu_mes_get_hung_queue_db_array_size(struct amdgpu_device *adev);
+int amdgpu_mes_detect_and_reset_hung_queues(struct amdgpu_device *adev,
+ int queue_type,
+ bool detect_only,
+ unsigned int *hung_db_num,
+ u32 *hung_db_array);
+
+uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg);
+int amdgpu_mes_wreg(struct amdgpu_device *adev,
+ uint32_t reg, uint32_t val);
+int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
+ uint32_t reg0, uint32_t reg1,
+ uint32_t ref, uint32_t mask);
+int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
+ uint64_t process_context_addr,
+ uint32_t spi_gdbg_per_vmid_cntl,
+ const uint32_t *tcp_watch_cntl,
+ uint32_t flags,
+ bool trap_en);
+int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
+ uint64_t process_context_addr);
+
+uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
+ enum amdgpu_mes_priority_level prio);
+
+int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev);
+
+/*
+ * MES lock can be taken in MMU notifiers.
+ *
+ * A bit more detail about why to set no-FS reclaim with MES lock:
+ *
+ * The purpose of the MMU notifier is to stop GPU access to memory so
+ * that the Linux VM subsystem can move pages around safely. This is
+ * done by preempting user mode queues for the affected process. When
+ * MES is used, MES lock needs to be taken to preempt the queues.
+ *
+ * The MMU notifier callback entry point in the driver is
+ * amdgpu_mn_invalidate_range_start_hsa. The relevant call chain from
+ * there is:
+ * amdgpu_amdkfd_evict_userptr -> kgd2kfd_quiesce_mm ->
+ * kfd_process_evict_queues -> pdd->dev->dqm->ops.evict_process_queues
+ *
+ * The last part of the chain is a function pointer where we take the
+ * MES lock.
+ *
+ * The problem with taking locks in the MMU notifier is, that MMU
+ * notifiers can be called in reclaim-FS context. That's where the
+ * kernel frees up pages to make room for new page allocations under
+ * memory pressure. While we are running in reclaim-FS context, we must
+ * not trigger another memory reclaim operation because that would
+ * recursively reenter the reclaim code and cause a deadlock. The
+ * memalloc_nofs_save/restore calls guarantee that.
+ *
+ * In addition we also need to avoid lock dependencies on other locks taken
+ * under the MES lock, for example reservation locks. Here is a possible
+ * scenario of a deadlock:
+ * Thread A: takes and holds reservation lock | triggers reclaim-FS |
+ * MMU notifier | blocks trying to take MES lock
+ * Thread B: takes and holds MES lock | blocks trying to take reservation lock
+ *
+ * In this scenario Thread B gets involved in a deadlock even without
+ * triggering a reclaim-FS operation itself.
+ * To fix this and break the lock dependency chain you'd need to either:
+ * 1. protect reservation locks with memalloc_nofs_save/restore, or
+ * 2. avoid taking reservation locks under the MES lock.
+ *
+ * Reservation locks are taken all over the kernel in different subsystems, we
+ * have no control over them and their lock dependencies.So the only workable
+ * solution is to avoid taking other locks under the MES lock.
+ * As a result, make sure no reclaim-FS happens while holding this lock anywhere
+ * to prevent deadlocks when an MMU notifier runs in reclaim-FS context.
+ */
+static inline void amdgpu_mes_lock(struct amdgpu_mes *mes)
+{
+ mutex_lock(&mes->mutex_hidden);
+ mes->saved_flags = memalloc_noreclaim_save();
+}
+
+static inline void amdgpu_mes_unlock(struct amdgpu_mes *mes)
+{
+ memalloc_noreclaim_restore(mes->saved_flags);
+ mutex_unlock(&mes->mutex_hidden);
+}
+
+bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev);
+
+int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev);
+
+#endif /* __AMDGPU_MES_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes_ctx.h
new file mode 100644
index 000000000000..912a5be2ece6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes_ctx.h
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_MES_CTX_H__
+#define __AMDGPU_MES_CTX_H__
+
+#include "v10_structs.h"
+
+enum {
+ AMDGPU_MES_CTX_RPTR_OFFS = 0,
+ AMDGPU_MES_CTX_WPTR_OFFS,
+ AMDGPU_MES_CTX_FENCE_OFFS,
+ AMDGPU_MES_CTX_COND_EXE_OFFS,
+ AMDGPU_MES_CTX_TRAIL_FENCE_OFFS,
+ AMDGPU_MES_CTX_MAX_OFFS,
+};
+
+enum {
+ AMDGPU_MES_CTX_RING_OFFS = AMDGPU_MES_CTX_MAX_OFFS,
+ AMDGPU_MES_CTX_IB_OFFS,
+ AMDGPU_MES_CTX_PADDING_OFFS,
+};
+
+#define AMDGPU_MES_CTX_MAX_GFX_RINGS 1
+#define AMDGPU_MES_CTX_MAX_COMPUTE_RINGS 4
+#define AMDGPU_MES_CTX_MAX_SDMA_RINGS 2
+#define AMDGPU_MES_CTX_MAX_RINGS \
+ (AMDGPU_MES_CTX_MAX_GFX_RINGS + \
+ AMDGPU_MES_CTX_MAX_COMPUTE_RINGS + \
+ AMDGPU_MES_CTX_MAX_SDMA_RINGS)
+
+#define AMDGPU_CSA_SDMA_SIZE 64
+#define GFX10_MEC_HPD_SIZE 2048
+
+struct amdgpu_wb_slot {
+ uint32_t data[8];
+};
+
+struct amdgpu_mes_ctx_meta_data {
+ struct {
+ uint8_t ring[PAGE_SIZE * 4];
+
+ /* gfx csa */
+ struct v10_gfx_meta_data gfx_meta_data;
+
+ uint8_t gds_backup[64 * 1024];
+
+ struct amdgpu_wb_slot slots[AMDGPU_MES_CTX_MAX_OFFS];
+
+ /* only for ib test */
+ uint32_t ib[256] __aligned(256);
+
+ uint32_t padding[64];
+
+ } __aligned(PAGE_SIZE) gfx[AMDGPU_MES_CTX_MAX_GFX_RINGS];
+
+ struct {
+ uint8_t ring[PAGE_SIZE * 4];
+
+ uint8_t mec_hpd[GFX10_MEC_HPD_SIZE];
+
+ struct amdgpu_wb_slot slots[AMDGPU_MES_CTX_MAX_OFFS];
+
+ /* only for ib test */
+ uint32_t ib[256] __aligned(256);
+
+ uint32_t padding[64];
+
+ } __aligned(PAGE_SIZE) compute[AMDGPU_MES_CTX_MAX_COMPUTE_RINGS];
+
+ struct {
+ uint8_t ring[PAGE_SIZE * 4];
+
+ /* sdma csa for mcbp */
+ uint8_t sdma_meta_data[AMDGPU_CSA_SDMA_SIZE];
+
+ struct amdgpu_wb_slot slots[AMDGPU_MES_CTX_MAX_OFFS];
+
+ /* only for ib test */
+ uint32_t ib[256] __aligned(256);
+
+ uint32_t padding[64];
+
+ } __aligned(PAGE_SIZE) sdma[AMDGPU_MES_CTX_MAX_SDMA_RINGS];
+};
+
+struct amdgpu_mes_ctx_data {
+ struct amdgpu_bo *meta_data_obj;
+ uint64_t meta_data_gpu_addr;
+ uint64_t meta_data_mc_addr;
+ struct amdgpu_bo_va *meta_data_va;
+ void *meta_data_ptr;
+ uint32_t gang_ids[AMDGPU_HW_IP_DMA+1];
+};
+
+#define AMDGPU_FENCE_MES_QUEUE_FLAG 0x1000000u
+#define AMDGPU_FENCE_MES_QUEUE_ID_MASK (AMDGPU_FENCE_MES_QUEUE_FLAG - 1)
+
+#define AMDGPU_FENCE_MES_QUEUE_FLAG 0x1000000u
+#define AMDGPU_FENCE_MES_QUEUE_ID_MASK (AMDGPU_FENCE_MES_QUEUE_FLAG - 1)
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
new file mode 100644
index 000000000000..0f6b1021fef3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+
+int amdgpu_mmhub_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_mmhub_ras *ras;
+
+ if (!adev->mmhub.ras)
+ return 0;
+
+ ras = adev->mmhub.ras;
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register mmhub ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "mmhub");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__MMHUB;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->mmhub.ras_if = &ras->ras_block.ras_comm;
+
+ /* mmhub ras follows amdgpu_ras_block_late_init_default for late init */
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
new file mode 100644
index 000000000000..1ca9d4ed8063
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __AMDGPU_MMHUB_H__
+#define __AMDGPU_MMHUB_H__
+
+enum amdgpu_mmhub_ras_memory_id {
+ AMDGPU_MMHUB_WGMI_PAGEMEM = 0,
+ AMDGPU_MMHUB_RGMI_PAGEMEM = 1,
+ AMDGPU_MMHUB_WDRAM_PAGEMEM = 2,
+ AMDGPU_MMHUB_RDRAM_PAGEMEM = 3,
+ AMDGPU_MMHUB_WIO_CMDMEM = 4,
+ AMDGPU_MMHUB_RIO_CMDMEM = 5,
+ AMDGPU_MMHUB_WGMI_CMDMEM = 6,
+ AMDGPU_MMHUB_RGMI_CMDMEM = 7,
+ AMDGPU_MMHUB_WDRAM_CMDMEM = 8,
+ AMDGPU_MMHUB_RDRAM_CMDMEM = 9,
+ AMDGPU_MMHUB_MAM_DMEM0 = 10,
+ AMDGPU_MMHUB_MAM_DMEM1 = 11,
+ AMDGPU_MMHUB_MAM_DMEM2 = 12,
+ AMDGPU_MMHUB_MAM_DMEM3 = 13,
+ AMDGPU_MMHUB_WRET_TAGMEM = 19,
+ AMDGPU_MMHUB_RRET_TAGMEM = 20,
+ AMDGPU_MMHUB_WIO_DATAMEM = 21,
+ AMDGPU_MMHUB_WGMI_DATAMEM = 22,
+ AMDGPU_MMHUB_WDRAM_DATAMEM = 23,
+ AMDGPU_MMHUB_MEMORY_BLOCK_LAST,
+};
+
+struct amdgpu_mmhub_ras {
+ struct amdgpu_ras_block_object ras_block;
+};
+
+struct amdgpu_mmhub_funcs {
+ u64 (*get_fb_location)(struct amdgpu_device *adev);
+ u64 (*get_mc_fb_offset)(struct amdgpu_device *adev);
+ void (*init)(struct amdgpu_device *adev);
+ int (*gart_enable)(struct amdgpu_device *adev);
+ void (*set_fault_enable_default)(struct amdgpu_device *adev,
+ bool value);
+ void (*gart_disable)(struct amdgpu_device *adev);
+ int (*set_clockgating)(struct amdgpu_device *adev,
+ enum amd_clockgating_state state);
+ void (*get_clockgating)(struct amdgpu_device *adev, u64 *flags);
+ void (*setup_vm_pt_regs)(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base);
+ void (*update_power_gating)(struct amdgpu_device *adev,
+ bool enable);
+};
+
+struct amdgpu_mmhub {
+ struct ras_common_if *ras_if;
+ const struct amdgpu_mmhub_funcs *funcs;
+ struct amdgpu_mmhub_ras *ras;
+};
+
+int amdgpu_mmhub_ras_sw_init(struct amdgpu_device *adev);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
deleted file mode 100644
index 38f739fb727b..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ /dev/null
@@ -1,373 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- */
-/*
- * Authors:
- * Christian König <christian.koenig@amd.com>
- */
-
-#include <linux/firmware.h>
-#include <linux/module.h>
-#include <linux/mmu_notifier.h>
-#include <linux/interval_tree.h>
-#include <drm/drmP.h>
-#include <drm/drm.h>
-
-#include "amdgpu.h"
-
-struct amdgpu_mn {
- /* constant after initialisation */
- struct amdgpu_device *adev;
- struct mm_struct *mm;
- struct mmu_notifier mn;
-
- /* only used on destruction */
- struct work_struct work;
-
- /* protected by adev->mn_lock */
- struct hlist_node node;
-
- /* objects protected by lock */
- struct mutex lock;
- struct rb_root objects;
-};
-
-struct amdgpu_mn_node {
- struct interval_tree_node it;
- struct list_head bos;
-};
-
-/**
- * amdgpu_mn_destroy - destroy the rmn
- *
- * @work: previously sheduled work item
- *
- * Lazy destroys the notifier from a work item
- */
-static void amdgpu_mn_destroy(struct work_struct *work)
-{
- struct amdgpu_mn *rmn = container_of(work, struct amdgpu_mn, work);
- struct amdgpu_device *adev = rmn->adev;
- struct amdgpu_mn_node *node, *next_node;
- struct amdgpu_bo *bo, *next_bo;
-
- mutex_lock(&adev->mn_lock);
- mutex_lock(&rmn->lock);
- hash_del(&rmn->node);
- rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
- it.rb) {
- list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
- bo->mn = NULL;
- list_del_init(&bo->mn_list);
- }
- kfree(node);
- }
- mutex_unlock(&rmn->lock);
- mutex_unlock(&adev->mn_lock);
- mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
- kfree(rmn);
-}
-
-/**
- * amdgpu_mn_release - callback to notify about mm destruction
- *
- * @mn: our notifier
- * @mn: the mm this callback is about
- *
- * Shedule a work item to lazy destroy our notifier.
- */
-static void amdgpu_mn_release(struct mmu_notifier *mn,
- struct mm_struct *mm)
-{
- struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
- INIT_WORK(&rmn->work, amdgpu_mn_destroy);
- schedule_work(&rmn->work);
-}
-
-/**
- * amdgpu_mn_invalidate_node - unmap all BOs of a node
- *
- * @node: the node with the BOs to unmap
- *
- * We block for all BOs and unmap them by move them
- * into system domain again.
- */
-static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
- unsigned long start,
- unsigned long end)
-{
- struct amdgpu_bo *bo;
- long r;
-
- list_for_each_entry(bo, &node->bos, mn_list) {
-
- if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
- continue;
-
- r = amdgpu_bo_reserve(bo, true);
- if (r) {
- DRM_ERROR("(%ld) failed to reserve user bo\n", r);
- continue;
- }
-
- r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
- true, false, MAX_SCHEDULE_TIMEOUT);
- if (r <= 0)
- DRM_ERROR("(%ld) failed to wait for user bo\n", r);
-
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (r)
- DRM_ERROR("(%ld) failed to validate user bo\n", r);
-
- amdgpu_bo_unreserve(bo);
- }
-}
-
-/**
- * amdgpu_mn_invalidate_page - callback to notify about mm change
- *
- * @mn: our notifier
- * @mn: the mm this callback is about
- * @address: address of invalidate page
- *
- * Invalidation of a single page. Blocks for all BOs mapping it
- * and unmap them by move them into system domain again.
- */
-static void amdgpu_mn_invalidate_page(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long address)
-{
- struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
- struct interval_tree_node *it;
-
- mutex_lock(&rmn->lock);
-
- it = interval_tree_iter_first(&rmn->objects, address, address);
- if (it) {
- struct amdgpu_mn_node *node;
-
- node = container_of(it, struct amdgpu_mn_node, it);
- amdgpu_mn_invalidate_node(node, address, address);
- }
-
- mutex_unlock(&rmn->lock);
-}
-
-/**
- * amdgpu_mn_invalidate_range_start - callback to notify about mm change
- *
- * @mn: our notifier
- * @mn: the mm this callback is about
- * @start: start of updated range
- * @end: end of updated range
- *
- * We block for all BOs between start and end to be idle and
- * unmap them by move them into system domain again.
- */
-static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end)
-{
- struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
- struct interval_tree_node *it;
-
- /* notification is exclusive, but interval is inclusive */
- end -= 1;
-
- mutex_lock(&rmn->lock);
-
- it = interval_tree_iter_first(&rmn->objects, start, end);
- while (it) {
- struct amdgpu_mn_node *node;
-
- node = container_of(it, struct amdgpu_mn_node, it);
- it = interval_tree_iter_next(it, start, end);
-
- amdgpu_mn_invalidate_node(node, start, end);
- }
-
- mutex_unlock(&rmn->lock);
-}
-
-static const struct mmu_notifier_ops amdgpu_mn_ops = {
- .release = amdgpu_mn_release,
- .invalidate_page = amdgpu_mn_invalidate_page,
- .invalidate_range_start = amdgpu_mn_invalidate_range_start,
-};
-
-/**
- * amdgpu_mn_get - create notifier context
- *
- * @adev: amdgpu device pointer
- *
- * Creates a notifier context for current->mm.
- */
-static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
-{
- struct mm_struct *mm = current->mm;
- struct amdgpu_mn *rmn;
- int r;
-
- mutex_lock(&adev->mn_lock);
- if (down_write_killable(&mm->mmap_sem)) {
- mutex_unlock(&adev->mn_lock);
- return ERR_PTR(-EINTR);
- }
-
- hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm)
- if (rmn->mm == mm)
- goto release_locks;
-
- rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
- if (!rmn) {
- rmn = ERR_PTR(-ENOMEM);
- goto release_locks;
- }
-
- rmn->adev = adev;
- rmn->mm = mm;
- rmn->mn.ops = &amdgpu_mn_ops;
- mutex_init(&rmn->lock);
- rmn->objects = RB_ROOT;
-
- r = __mmu_notifier_register(&rmn->mn, mm);
- if (r)
- goto free_rmn;
-
- hash_add(adev->mn_hash, &rmn->node, (unsigned long)mm);
-
-release_locks:
- up_write(&mm->mmap_sem);
- mutex_unlock(&adev->mn_lock);
-
- return rmn;
-
-free_rmn:
- up_write(&mm->mmap_sem);
- mutex_unlock(&adev->mn_lock);
- kfree(rmn);
-
- return ERR_PTR(r);
-}
-
-/**
- * amdgpu_mn_register - register a BO for notifier updates
- *
- * @bo: amdgpu buffer object
- * @addr: userptr addr we should monitor
- *
- * Registers an MMU notifier for the given BO at the specified address.
- * Returns 0 on success, -ERRNO if anything goes wrong.
- */
-int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
-{
- unsigned long end = addr + amdgpu_bo_size(bo) - 1;
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct amdgpu_mn *rmn;
- struct amdgpu_mn_node *node = NULL;
- struct list_head bos;
- struct interval_tree_node *it;
-
- rmn = amdgpu_mn_get(adev);
- if (IS_ERR(rmn))
- return PTR_ERR(rmn);
-
- INIT_LIST_HEAD(&bos);
-
- mutex_lock(&rmn->lock);
-
- while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
- kfree(node);
- node = container_of(it, struct amdgpu_mn_node, it);
- interval_tree_remove(&node->it, &rmn->objects);
- addr = min(it->start, addr);
- end = max(it->last, end);
- list_splice(&node->bos, &bos);
- }
-
- if (!node) {
- node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
- if (!node) {
- mutex_unlock(&rmn->lock);
- return -ENOMEM;
- }
- }
-
- bo->mn = rmn;
-
- node->it.start = addr;
- node->it.last = end;
- INIT_LIST_HEAD(&node->bos);
- list_splice(&bos, &node->bos);
- list_add(&bo->mn_list, &node->bos);
-
- interval_tree_insert(&node->it, &rmn->objects);
-
- mutex_unlock(&rmn->lock);
-
- return 0;
-}
-
-/**
- * amdgpu_mn_unregister - unregister a BO for notifier updates
- *
- * @bo: amdgpu buffer object
- *
- * Remove any registration of MMU notifier updates from the buffer object.
- */
-void amdgpu_mn_unregister(struct amdgpu_bo *bo)
-{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct amdgpu_mn *rmn;
- struct list_head *head;
-
- mutex_lock(&adev->mn_lock);
-
- rmn = bo->mn;
- if (rmn == NULL) {
- mutex_unlock(&adev->mn_lock);
- return;
- }
-
- mutex_lock(&rmn->lock);
-
- /* save the next list entry for later */
- head = bo->mn_list.next;
-
- bo->mn = NULL;
- list_del(&bo->mn_list);
-
- if (list_empty(head)) {
- struct amdgpu_mn_node *node;
- node = container_of(head, struct amdgpu_mn_node, bos);
- interval_tree_remove(&node->it, &rmn->objects);
- kfree(node);
- }
-
- mutex_unlock(&rmn->lock);
- mutex_unlock(&adev->mn_lock);
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index dbd10618ec20..20460cfd09bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -30,32 +30,39 @@
#ifndef AMDGPU_MODE_H
#define AMDGPU_MODE_H
+#include <drm/display/drm_dp_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
-#include <drm/drm_dp_helper.h>
#include <drm/drm_fixed.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_probe_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/hrtimer.h>
#include "amdgpu_irq.h"
+#include <drm/display/drm_dp_mst_helper.h>
+#include "modules/inc/mod_freesync.h"
+#include "amdgpu_dm_irq_params.h"
+
struct amdgpu_bo;
struct amdgpu_device;
struct amdgpu_encoder;
struct amdgpu_router;
struct amdgpu_hpd;
+struct edid;
+struct drm_edid;
#define to_amdgpu_crtc(x) container_of(x, struct amdgpu_crtc, base)
#define to_amdgpu_connector(x) container_of(x, struct amdgpu_connector, base)
#define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base)
#define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base)
+#define to_dm_plane_state(x) container_of(x, struct dm_plane_state, base)
+
#define AMDGPU_MAX_HPD_PINS 6
#define AMDGPU_MAX_CRTCS 6
+#define AMDGPU_MAX_PLANES 6
#define AMDGPU_MAX_AFMT_BLOCKS 9
enum amdgpu_rmx_type {
@@ -81,7 +88,6 @@ enum amdgpu_hpd_id {
AMDGPU_HPD_4,
AMDGPU_HPD_5,
AMDGPU_HPD_6,
- AMDGPU_HPD_LAST,
AMDGPU_HPD_NONE = 0xff,
};
@@ -98,7 +104,6 @@ enum amdgpu_crtc_irq {
AMDGPU_CRTC_IRQ_VLINE4,
AMDGPU_CRTC_IRQ_VLINE5,
AMDGPU_CRTC_IRQ_VLINE6,
- AMDGPU_CRTC_IRQ_LAST,
AMDGPU_CRTC_IRQ_NONE = 0xff
};
@@ -109,7 +114,6 @@ enum amdgpu_pageflip_irq {
AMDGPU_PAGEFLIP_IRQ_D4,
AMDGPU_PAGEFLIP_IRQ_D5,
AMDGPU_PAGEFLIP_IRQ_D6,
- AMDGPU_PAGEFLIP_IRQ_LAST,
AMDGPU_PAGEFLIP_IRQ_NONE = 0xff
};
@@ -227,8 +231,6 @@ struct amdgpu_i2c_chan {
struct mutex mutex;
};
-struct amdgpu_fbdev;
-
struct amdgpu_afmt {
bool enabled;
int offset;
@@ -257,21 +259,11 @@ struct amdgpu_audio {
int num_pins;
};
-struct amdgpu_mode_mc_save {
- u32 vga_render_control;
- u32 vga_hdp_control;
- bool crtc_enabled[AMDGPU_MAX_CRTCS];
-};
-
struct amdgpu_display_funcs {
- /* vga render */
- void (*set_vga_render_state)(struct amdgpu_device *adev, bool render);
/* display watermarks */
void (*bandwidth_update)(struct amdgpu_device *adev);
/* get frame count */
u32 (*vblank_get_counter)(struct amdgpu_device *adev, int crtc);
- /* wait for vblank */
- void (*vblank_wait)(struct amdgpu_device *adev, int crtc);
/* set backlight level */
void (*backlight_set_level)(struct amdgpu_encoder *amdgpu_encoder,
u8 level);
@@ -300,10 +292,19 @@ struct amdgpu_display_funcs {
uint16_t connector_object_id,
struct amdgpu_hpd *hpd,
struct amdgpu_router *router);
- void (*stop_mc_access)(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save);
- void (*resume_mc_access)(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save);
+
+
+};
+
+struct amdgpu_framebuffer {
+ struct drm_framebuffer base;
+
+ uint64_t tiling_flags;
+ bool tmz_surface;
+ bool gfx12_dcc;
+
+ /* caching for later use */
+ uint64_t address;
};
struct amdgpu_mode_info {
@@ -311,6 +312,7 @@ struct amdgpu_mode_info {
struct card_info *atom_card_info;
bool mode_config_initialized;
struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
+ struct drm_plane *planes[AMDGPU_MAX_PLANES];
struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
@@ -325,34 +327,121 @@ struct amdgpu_mode_info {
/* FMT dithering */
struct drm_property *dither_property;
/* hardcoded DFP edid from BIOS */
- struct edid *bios_hardcoded_edid;
- int bios_hardcoded_edid_size;
+ const struct drm_edid *bios_hardcoded_edid;
- /* pointer to fbdev info structure */
- struct amdgpu_fbdev *rfbdev;
/* firmware flags */
- u16 firmware_flags;
+ u32 firmware_flags;
/* pointer to backlight encoder */
struct amdgpu_encoder *bl_encoder;
+ u8 bl_level; /* saved backlight level */
struct amdgpu_audio audio; /* audio stuff */
int num_crtc; /* number of crtcs */
int num_hpd; /* number of hpd pins */
int num_dig; /* number of dig blocks */
+ bool gpu_vm_support; /* supports display from GTT */
int disp_priority;
const struct amdgpu_display_funcs *funcs;
+ const enum drm_plane_type *plane_type;
+
+ /* Driver-private color mgmt props */
+
+ /* @plane_degamma_lut_property: Plane property to set a degamma LUT to
+ * convert encoded values to light linear values before sampling or
+ * blending.
+ */
+ struct drm_property *plane_degamma_lut_property;
+ /* @plane_degamma_lut_size_property: Plane property to define the max
+ * size of degamma LUT as supported by the driver (read-only).
+ */
+ struct drm_property *plane_degamma_lut_size_property;
+ /**
+ * @plane_degamma_tf_property: Plane pre-defined transfer function to
+ * to go from scanout/encoded values to linear values.
+ */
+ struct drm_property *plane_degamma_tf_property;
+ /**
+ * @plane_hdr_mult_property:
+ */
+ struct drm_property *plane_hdr_mult_property;
+
+ struct drm_property *plane_ctm_property;
+ /**
+ * @shaper_lut_property: Plane property to set pre-blending shaper LUT
+ * that converts color content before 3D LUT. If
+ * plane_shaper_tf_property != Identity TF, AMD color module will
+ * combine the user LUT values with pre-defined TF into the LUT
+ * parameters to be programmed.
+ */
+ struct drm_property *plane_shaper_lut_property;
+ /**
+ * @shaper_lut_size_property: Plane property for the size of
+ * pre-blending shaper LUT as supported by the driver (read-only).
+ */
+ struct drm_property *plane_shaper_lut_size_property;
+ /**
+ * @plane_shaper_tf_property: Plane property to set a predefined
+ * transfer function for pre-blending shaper (before applying 3D LUT)
+ * with or without LUT. There is no shaper ROM, but we can use AMD
+ * color modules to program LUT parameters from predefined TF (or
+ * from a combination of pre-defined TF and the custom 1D LUT).
+ */
+ struct drm_property *plane_shaper_tf_property;
+ /**
+ * @plane_lut3d_property: Plane property for color transformation using
+ * a 3D LUT (pre-blending), a three-dimensional array where each
+ * element is an RGB triplet. Each dimension has the size of
+ * lut3d_size. The array contains samples from the approximated
+ * function. On AMD, values between samples are estimated by
+ * tetrahedral interpolation. The array is accessed with three indices,
+ * one for each input dimension (color channel), blue being the
+ * outermost dimension, red the innermost.
+ */
+ struct drm_property *plane_lut3d_property;
+ /**
+ * @plane_degamma_lut_size_property: Plane property to define the max
+ * size of 3D LUT as supported by the driver (read-only). The max size
+ * is the max size of one dimension and, therefore, the max number of
+ * entries for 3D LUT array is the 3D LUT size cubed;
+ */
+ struct drm_property *plane_lut3d_size_property;
+ /**
+ * @plane_blend_lut_property: Plane property for output gamma before
+ * blending. Userspace set a blend LUT to convert colors after 3D LUT
+ * conversion. It works as a post-3DLUT 1D LUT. With shaper LUT, they
+ * are sandwiching 3D LUT with two 1D LUT. If plane_blend_tf_property
+ * != Identity TF, AMD color module will combine the user LUT values
+ * with pre-defined TF into the LUT parameters to be programmed.
+ */
+ struct drm_property *plane_blend_lut_property;
+ /**
+ * @plane_blend_lut_size_property: Plane property to define the max
+ * size of blend LUT as supported by the driver (read-only).
+ */
+ struct drm_property *plane_blend_lut_size_property;
+ /**
+ * @plane_blend_tf_property: Plane property to set a predefined
+ * transfer function for pre-blending blend/out_gamma (after applying
+ * 3D LUT) with or without LUT. There is no blend ROM, but we can use
+ * AMD color modules to program LUT parameters from predefined TF (or
+ * from a combination of pre-defined TF and the custom 1D LUT).
+ */
+ struct drm_property *plane_blend_tf_property;
+ /* @regamma_tf_property: Transfer function for CRTC regamma
+ * (post-blending). Possible values are defined by `enum
+ * amdgpu_transfer_function`. There is no regamma ROM, but we can use
+ * AMD color modules to program LUT parameters from predefined TF (or
+ * from a combination of pre-defined TF and the custom 1D LUT).
+ */
+ struct drm_property *regamma_tf_property;
};
#define AMDGPU_MAX_BL_LEVEL 0xFF
-#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
-
struct amdgpu_backlight_privdata {
struct amdgpu_encoder *encoder;
uint8_t negative;
};
-#endif
-
struct amdgpu_atom_ss {
uint16_t percentage;
uint16_t percentage_divider;
@@ -369,7 +458,6 @@ struct amdgpu_atom_ss {
struct amdgpu_crtc {
struct drm_crtc base;
int crtc_id;
- u16 lut_r[256], lut_g[256], lut_b[256];
bool enabled;
bool can_tile;
uint32_t crtc_offset;
@@ -394,6 +482,8 @@ struct amdgpu_crtc {
struct amdgpu_flip_work *pflip_works;
enum amdgpu_flip_status pflip_status;
int deferred_flip_completion;
+ /* parameters access from DM IRQ handler */
+ struct dm_irq_params dm_irq_params;
/* pll sharing */
struct amdgpu_atom_ss ss;
bool ss_enabled;
@@ -406,13 +496,18 @@ struct amdgpu_crtc {
struct drm_connector *connector;
/* for dpm */
u32 line_time;
- u32 wm_low;
- u32 wm_high;
u32 lb_vblank_lead_lines;
struct drm_display_mode hw_mode;
/* for virtual dce */
struct hrtimer vblank_timer;
enum amdgpu_interrupt_state vsync_timer_enabled;
+
+ int otg_inst;
+ struct drm_pending_vblank_event *event;
+
+ bool wb_pending;
+ bool wb_enabled;
+ struct drm_writeback_connector *wb_conn;
};
struct amdgpu_encoder_atom_dig {
@@ -455,6 +550,7 @@ struct amdgpu_encoder {
struct amdgpu_connector_atom_dig {
/* displayport */
u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
u8 dp_sink_type;
int dp_clock;
int dp_lane_count;
@@ -502,6 +598,20 @@ enum amdgpu_connector_dither {
AMDGPU_FMT_DITHER_ENABLE = 1,
};
+struct amdgpu_dm_dp_aux {
+ struct drm_dp_aux aux;
+ struct ddc_service *ddc_service;
+};
+
+struct amdgpu_i2c_adapter {
+ struct i2c_adapter base;
+
+ struct ddc_service *ddc_service;
+ bool oem;
+};
+
+#define TO_DM_AUX(x) container_of((x), struct amdgpu_dm_dp_aux, aux)
+
struct amdgpu_connector {
struct drm_connector base;
uint32_t connector_id;
@@ -516,6 +626,7 @@ struct amdgpu_connector {
void *con_priv;
bool dac_load_detect;
bool detected_by_load; /* if the connection status was determined by load */
+ bool detected_hpd_without_ddc; /* if an HPD signal was detected on DVI, but ddc probing failed */
uint16_t connector_object_id;
struct amdgpu_hpd hpd;
struct amdgpu_router router;
@@ -525,15 +636,25 @@ struct amdgpu_connector {
unsigned pixelclock_for_modeset;
};
-struct amdgpu_framebuffer {
- struct drm_framebuffer base;
- struct drm_gem_object *obj;
+/* TODO: start to use this struct and remove same field from base one */
+struct amdgpu_mst_connector {
+ struct amdgpu_connector base;
+
+ struct drm_dp_mst_topology_mgr mst_mgr;
+ struct amdgpu_dm_dp_aux dm_dp_aux;
+ struct drm_dp_mst_port *mst_output_port;
+ struct amdgpu_connector *mst_root;
+ bool is_mst_connector;
+ struct amdgpu_encoder *mst_encoder;
};
#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
((em) == ATOM_ENCODER_MODE_DP_MST))
-/* Driver internal use only flags of amdgpu_get_crtc_scanoutpos() */
+/* Driver internal use only flags of amdgpu_display_get_crtc_scanoutpos() */
+#define DRM_SCANOUTPOS_VALID (1 << 0)
+#define DRM_SCANOUTPOS_IN_VBLANK (1 << 1)
+#define DRM_SCANOUTPOS_ACCURATE (1 << 2)
#define USE_REAL_VBLANKSTART (1 << 30)
#define GET_DISTANCE_TO_VBLANKSTART (1 << 31)
@@ -549,54 +670,42 @@ bool amdgpu_dig_monitor_is_duallink(struct drm_encoder *encoder,
u16 amdgpu_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder);
-bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector, bool use_aux);
+bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
+ bool use_aux);
void amdgpu_encoder_set_active_device(struct drm_encoder *encoder);
-int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
- unsigned int flags, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode);
-
-int amdgpu_framebuffer_init(struct drm_device *dev,
- struct amdgpu_framebuffer *rfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj);
+int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
+ unsigned int pipe, unsigned int flags, int *vpos,
+ int *hpos, ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
int amdgpufb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
void amdgpu_enc_destroy(struct drm_encoder *encoder);
void amdgpu_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
-bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
+bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
void amdgpu_panel_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode);
-int amdgpu_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc);
-
-/* fbdev layer */
-int amdgpu_fbdev_init(struct amdgpu_device *adev);
-void amdgpu_fbdev_fini(struct amdgpu_device *adev);
-void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state);
-int amdgpu_fbdev_total_size(struct amdgpu_device *adev);
-bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj);
-void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev);
-
-void amdgpu_fb_output_poll_changed(struct amdgpu_device *adev);
-
+int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc);
-int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled);
+bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq, int *vpos,
+ int *hpos, ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
/* amdgpu_display.c */
-void amdgpu_print_display_setup(struct drm_device *dev);
-int amdgpu_modeset_create_props(struct amdgpu_device *adev);
-int amdgpu_crtc_set_config(struct drm_mode_set *set,
- struct drm_modeset_acquire_ctx *ctx);
-int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags, uint32_t target,
- struct drm_modeset_acquire_ctx *ctx);
+void amdgpu_display_print_display_setup(struct drm_device *dev);
+int amdgpu_display_modeset_create_props(struct amdgpu_device *adev);
+int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
+ struct drm_modeset_acquire_ctx *ctx);
+int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags, uint32_t target,
+ struct drm_modeset_acquire_ctx *ctx);
extern const struct drm_mode_config_funcs amdgpu_mode_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
new file mode 100644
index 000000000000..a974265837f0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+
+int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_nbio_ras *ras;
+
+ if (!adev->nbio.ras)
+ return 0;
+
+ ras = adev->nbio.ras;
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register pcie_bif ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "pcie_bif");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__PCIE_BIF;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->nbio.ras_if = &ras->ras_block.ras_comm;
+
+ return 0;
+}
+
+u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev)
+{
+ if (adev->nbio.funcs && adev->nbio.funcs->get_pcie_replay_count)
+ return adev->nbio.funcs->get_pcie_replay_count(adev);
+
+ return 0;
+}
+
+bool amdgpu_nbio_is_replay_cnt_supported(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev) || !adev->asic_funcs ||
+ !adev->asic_funcs->get_pcie_replay_count ||
+ (!adev->nbio.funcs || !adev->nbio.funcs->get_pcie_replay_count))
+ return false;
+
+ return true;
+}
+
+int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r;
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ if (amdgpu_ras_is_supported(adev, ras_block->block)) {
+ r = amdgpu_irq_get(adev, &adev->nbio.ras_controller_irq, 0);
+ if (r)
+ goto late_fini;
+ r = amdgpu_irq_get(adev, &adev->nbio.ras_err_event_athub_irq, 0);
+ if (r)
+ goto late_fini;
+ }
+
+ return 0;
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
new file mode 100644
index 000000000000..b528de6a01f6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_NBIO_H__
+#define __AMDGPU_NBIO_H__
+
+/*
+ * amdgpu nbio functions
+ */
+struct nbio_hdp_flush_reg {
+ u32 ref_and_mask_cp0;
+ u32 ref_and_mask_cp1;
+ u32 ref_and_mask_cp2;
+ u32 ref_and_mask_cp3;
+ u32 ref_and_mask_cp4;
+ u32 ref_and_mask_cp5;
+ u32 ref_and_mask_cp6;
+ u32 ref_and_mask_cp7;
+ u32 ref_and_mask_cp8;
+ u32 ref_and_mask_cp9;
+ u32 ref_and_mask_sdma0;
+ u32 ref_and_mask_sdma1;
+ u32 ref_and_mask_sdma2;
+ u32 ref_and_mask_sdma3;
+ u32 ref_and_mask_sdma4;
+ u32 ref_and_mask_sdma5;
+ u32 ref_and_mask_sdma6;
+ u32 ref_and_mask_sdma7;
+};
+
+struct amdgpu_nbio_ras {
+ struct amdgpu_ras_block_object ras_block;
+ void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev);
+ void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev);
+ int (*init_ras_controller_interrupt)(struct amdgpu_device *adev);
+ int (*init_ras_err_event_athub_interrupt)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_nbio_funcs {
+ const struct nbio_hdp_flush_reg *hdp_flush_reg;
+ u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
+ u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
+ u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
+ u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
+ u32 (*get_pcie_index_hi_offset)(struct amdgpu_device *adev);
+ u32 (*get_pcie_port_index_offset)(struct amdgpu_device *adev);
+ u32 (*get_pcie_port_data_offset)(struct amdgpu_device *adev);
+ u32 (*get_rev_id)(struct amdgpu_device *adev);
+ void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
+ u32 (*get_memsize)(struct amdgpu_device *adev);
+ void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
+ bool use_doorbell, int doorbell_index, int doorbell_size);
+ void (*vpe_doorbell_range)(struct amdgpu_device *adev, int instance,
+ bool use_doorbell, int doorbell_index, int doorbell_size);
+ void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell,
+ int doorbell_index, int instance);
+ void (*gc_doorbell_init)(struct amdgpu_device *adev);
+ void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
+ bool enable);
+ void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
+ bool enable);
+ void (*ih_doorbell_range)(struct amdgpu_device *adev,
+ bool use_doorbell, int doorbell_index);
+ void (*enable_doorbell_interrupt)(struct amdgpu_device *adev,
+ bool enable);
+ void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
+ bool enable);
+ void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
+ bool enable);
+ void (*get_clockgating_state)(struct amdgpu_device *adev,
+ u64 *flags);
+ void (*ih_control)(struct amdgpu_device *adev);
+ void (*init_registers)(struct amdgpu_device *adev);
+ void (*remap_hdp_registers)(struct amdgpu_device *adev);
+ void (*enable_aspm)(struct amdgpu_device *adev,
+ bool enable);
+ void (*program_aspm)(struct amdgpu_device *adev);
+ void (*apply_lc_spc_mode_wa)(struct amdgpu_device *adev);
+ void (*apply_l1_link_width_reconfig_wa)(struct amdgpu_device *adev);
+ void (*clear_doorbell_interrupt)(struct amdgpu_device *adev);
+ u32 (*get_rom_offset)(struct amdgpu_device *adev);
+ int (*get_compute_partition_mode)(struct amdgpu_device *adev);
+ u32 (*get_memory_partition_mode)(struct amdgpu_device *adev,
+ u32 *supp_modes);
+ bool (*is_nps_switch_requested)(struct amdgpu_device *adev);
+ u64 (*get_pcie_replay_count)(struct amdgpu_device *adev);
+ void (*set_reg_remap)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_nbio {
+ const struct nbio_hdp_flush_reg *hdp_flush_reg;
+ struct amdgpu_irq_src ras_controller_irq;
+ struct amdgpu_irq_src ras_err_event_athub_irq;
+ struct ras_common_if *ras_if;
+ const struct amdgpu_nbio_funcs *funcs;
+ struct amdgpu_nbio_ras *ras;
+};
+
+int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev);
+int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
+u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev);
+
+bool amdgpu_nbio_is_replay_cnt_supported(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 365883d7948d..e08f58de4b17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -31,191 +31,294 @@
*/
#include <linux/list.h>
#include <linux/slab.h>
-#include <drm/drmP.h>
+#include <linux/dma-buf.h>
+#include <linux/export.h>
+
+#include <drm/drm_drv.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_cache.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_vram_mgr.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_dma_buf.h"
+/**
+ * DOC: amdgpu_object
+ *
+ * This defines the interfaces to operate on an &amdgpu_bo buffer object which
+ * represents memory used by driver (VRAM, system memory, etc.). The driver
+ * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
+ * to create/destroy/set buffer object which are then managed by the kernel TTM
+ * memory manager.
+ * The interfaces are also used internally by kernel clients, including gfx,
+ * uvd, etc. for kernel managed allocations used by the GPU.
+ *
+ */
-
-static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev,
- struct ttm_mem_reg *mem)
-{
- if (mem->start << PAGE_SHIFT >= adev->mc.visible_vram_size)
- return 0;
-
- return ((mem->start << PAGE_SHIFT) + mem->size) >
- adev->mc.visible_vram_size ?
- adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) :
- mem->size;
-}
-
-static void amdgpu_update_memory_usage(struct amdgpu_device *adev,
- struct ttm_mem_reg *old_mem,
- struct ttm_mem_reg *new_mem)
+static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
{
- u64 vis_size;
- if (!adev)
- return;
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
- if (new_mem) {
- switch (new_mem->mem_type) {
- case TTM_PL_TT:
- atomic64_add(new_mem->size, &adev->gtt_usage);
- break;
- case TTM_PL_VRAM:
- atomic64_add(new_mem->size, &adev->vram_usage);
- vis_size = amdgpu_get_vis_part_size(adev, new_mem);
- atomic64_add(vis_size, &adev->vram_vis_usage);
- break;
- }
- }
+ amdgpu_bo_kunmap(bo);
- if (old_mem) {
- switch (old_mem->mem_type) {
- case TTM_PL_TT:
- atomic64_sub(old_mem->size, &adev->gtt_usage);
- break;
- case TTM_PL_VRAM:
- atomic64_sub(old_mem->size, &adev->vram_usage);
- vis_size = amdgpu_get_vis_part_size(adev, old_mem);
- atomic64_sub(vis_size, &adev->vram_vis_usage);
- break;
- }
- }
+ if (drm_gem_is_imported(&bo->tbo.base))
+ drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
+ drm_gem_object_release(&bo->tbo.base);
+ amdgpu_bo_unref(&bo->parent);
+ kvfree(bo);
}
-static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
- struct amdgpu_bo *bo;
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
+ struct amdgpu_bo_user *ubo;
- bo = container_of(tbo, struct amdgpu_bo, tbo);
-
- amdgpu_update_memory_usage(adev, &bo->tbo.mem, NULL);
-
- drm_gem_object_release(&bo->gem_base);
- amdgpu_bo_unref(&bo->parent);
- if (!list_empty(&bo->shadow_list)) {
- mutex_lock(&adev->shadow_list_lock);
- list_del_init(&bo->shadow_list);
- mutex_unlock(&adev->shadow_list_lock);
- }
- kfree(bo->metadata);
- kfree(bo);
+ ubo = to_amdgpu_bo_user(bo);
+ kfree(ubo->metadata);
+ amdgpu_bo_destroy(tbo);
}
-bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
+/**
+ * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
+ * @bo: buffer object to be checked
+ *
+ * Uses destroy function associated with the object to determine if this is
+ * an &amdgpu_bo.
+ *
+ * Returns:
+ * true if the object belongs to &amdgpu_bo, false if not.
+ */
+bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
{
- if (bo->destroy == &amdgpu_ttm_bo_destroy)
+ if (bo->destroy == &amdgpu_bo_destroy ||
+ bo->destroy == &amdgpu_bo_user_destroy)
return true;
+
return false;
}
-static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
- struct ttm_placement *placement,
- struct ttm_place *places,
- u32 domain, u64 flags)
+/**
+ * amdgpu_bo_placement_from_domain - set buffer's placement
+ * @abo: &amdgpu_bo buffer object whose placement is to be set
+ * @domain: requested domain
+ *
+ * Sets buffer's placement according to requested domain and the buffer's
+ * flags.
+ */
+void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+ struct ttm_placement *placement = &abo->placement;
+ struct ttm_place *places = abo->placements;
+ u64 flags = abo->flags;
u32 c = 0;
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
- unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
-
- places[c].fpfn = 0;
- places[c].lpfn = 0;
- places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM;
+ unsigned int visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
+ int8_t mem_id = KFD_XCP_MEM_ID(adev, abo->xcp_id);
+
+ if (adev->gmc.mem_partitions && mem_id >= 0) {
+ places[c].fpfn = adev->gmc.mem_partitions[mem_id].range.fpfn;
+ /*
+ * memory partition range lpfn is inclusive start + size - 1
+ * TTM place lpfn is exclusive start + size
+ */
+ places[c].lpfn = adev->gmc.mem_partitions[mem_id].range.lpfn + 1;
+ } else {
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ }
+ places[c].mem_type = TTM_PL_VRAM;
+ places[c].flags = 0;
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
- places[c].lpfn = visible_pfn;
+ places[c].lpfn = min_not_zero(places[c].lpfn, visible_pfn);
else
places[c].flags |= TTM_PL_FLAG_TOPDOWN;
- if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
+ if (abo->tbo.type == ttm_bo_type_kernel &&
+ flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
+
+ c++;
+ }
+
+ if (domain & AMDGPU_GEM_DOMAIN_DOORBELL) {
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].mem_type = AMDGPU_PL_DOORBELL;
+ places[c].flags = 0;
+ c++;
+ }
+
+ if (domain & AMDGPU_GEM_DOMAIN_MMIO_REMAP) {
+ places[c].fpfn = 0;
+ places[c].lpfn = 0;
+ places[c].mem_type = AMDGPU_PL_MMIO_REMAP;
+ places[c].flags = 0;
c++;
}
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_FLAG_TT;
- if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
- places[c].flags |= TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED;
- else
- places[c].flags |= TTM_PL_FLAG_CACHED;
+ places[c].mem_type =
+ abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ?
+ AMDGPU_PL_PREEMPT : TTM_PL_TT;
+ places[c].flags = 0;
+ /*
+ * When GTT is just an alternative to VRAM make sure that we
+ * only use it as fallback and still try to fill up VRAM first.
+ */
+ if (abo->tbo.resource && !(adev->flags & AMD_IS_APU) &&
+ domain & abo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)
+ places[c].flags |= TTM_PL_FLAG_FALLBACK;
c++;
}
if (domain & AMDGPU_GEM_DOMAIN_CPU) {
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_FLAG_SYSTEM;
- if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
- places[c].flags |= TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED;
- else
- places[c].flags |= TTM_PL_FLAG_CACHED;
+ places[c].mem_type = TTM_PL_SYSTEM;
+ places[c].flags = 0;
c++;
}
if (domain & AMDGPU_GEM_DOMAIN_GDS) {
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
+ places[c].mem_type = AMDGPU_PL_GDS;
+ places[c].flags = 0;
c++;
}
if (domain & AMDGPU_GEM_DOMAIN_GWS) {
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
+ places[c].mem_type = AMDGPU_PL_GWS;
+ places[c].flags = 0;
c++;
}
if (domain & AMDGPU_GEM_DOMAIN_OA) {
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
+ places[c].mem_type = AMDGPU_PL_OA;
+ places[c].flags = 0;
c++;
}
if (!c) {
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ places[c].mem_type = TTM_PL_SYSTEM;
+ places[c].flags = 0;
c++;
}
+ BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS);
+
placement->num_placement = c;
placement->placement = places;
-
- placement->num_busy_placement = c;
- placement->busy_placement = places;
}
-void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
+/**
+ * amdgpu_bo_create_reserved - create reserved BO for kernel use
+ *
+ * @adev: amdgpu device object
+ * @size: size for the new BO
+ * @align: alignment for the new BO
+ * @domain: where to place it
+ * @bo_ptr: used to initialize BOs in structures
+ * @gpu_addr: GPU addr of the pinned BO
+ * @cpu_addr: optional CPU address mapping
+ *
+ * Allocates and pins a BO for kernel internal use, and returns it still
+ * reserved.
+ *
+ * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
+ unsigned long size, int align,
+ u32 domain, struct amdgpu_bo **bo_ptr,
+ u64 *gpu_addr, void **cpu_addr)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+ struct amdgpu_bo_param bp;
+ bool free = false;
+ int r;
- amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements,
- domain, abo->flags);
-}
+ if (!size) {
+ amdgpu_bo_unref(bo_ptr);
+ return 0;
+ }
-static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
- struct ttm_placement *placement)
-{
- BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1));
-
- memcpy(bo->placements, placement->placement,
- placement->num_placement * sizeof(struct ttm_place));
- bo->placement.num_placement = placement->num_placement;
- bo->placement.num_busy_placement = placement->num_busy_placement;
- bo->placement.placement = bo->placements;
- bo->placement.busy_placement = bo->placements;
+ memset(&bp, 0, sizeof(bp));
+ bp.size = size;
+ bp.byte_align = align;
+ bp.domain = domain;
+ bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
+ : AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+ bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ bp.type = ttm_bo_type_kernel;
+ bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
+ if (!*bo_ptr) {
+ r = amdgpu_bo_create(adev, &bp, bo_ptr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
+ r);
+ return r;
+ }
+ free = true;
+ }
+
+ r = amdgpu_bo_reserve(*bo_ptr, false);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
+ goto error_free;
+ }
+
+ r = amdgpu_bo_pin(*bo_ptr, domain);
+ if (r) {
+ dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
+ goto error_unreserve;
+ }
+
+ r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
+ if (r) {
+ dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
+ goto error_unpin;
+ }
+
+ if (gpu_addr)
+ *gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
+
+ if (cpu_addr) {
+ r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
+ if (r) {
+ dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
+ goto error_unpin;
+ }
+ }
+
+ return 0;
+
+error_unpin:
+ amdgpu_bo_unpin(*bo_ptr);
+error_unreserve:
+ amdgpu_bo_unreserve(*bo_ptr);
+
+error_free:
+ if (free)
+ amdgpu_bo_unref(bo_ptr);
+
+ return r;
}
/**
@@ -225,13 +328,19 @@ static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
* @size: size for the new BO
* @align: alignment for the new BO
* @domain: where to place it
- * @bo_ptr: resulting BO
+ * @bo_ptr: used to initialize BOs in structures
* @gpu_addr: GPU addr of the pinned BO
* @cpu_addr: optional CPU address mapping
*
* Allocates and pins a BO for kernel internal use.
*
- * Returns 0 on success, negative error code otherwise.
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to create and access the kernel BO.
+ *
+ * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
*/
int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
unsigned long size, int align,
@@ -240,45 +349,150 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
{
int r;
- r = amdgpu_bo_create(adev, size, align, true, domain,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, bo_ptr);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r);
+ r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
+ gpu_addr, cpu_addr);
+
+ if (r)
return r;
+
+ if (*bo_ptr)
+ amdgpu_bo_unreserve(*bo_ptr);
+
+ return 0;
+}
+
+/**
+ * amdgpu_bo_create_isp_user - create user BO for isp
+ *
+ * @adev: amdgpu device object
+ * @dma_buf: DMABUF handle for isp buffer
+ * @domain: where to place it
+ * @bo: used to initialize BOs in structures
+ * @gpu_addr: GPU addr of the pinned BO
+ *
+ * Imports isp DMABUF to allocate and pin a user BO for isp internal use. It does
+ * GART alloc to generate gpu_addr for BO to make it accessible through the
+ * GART aperture for ISP HW.
+ *
+ * This function is exported to allow the V4L2 isp device external to drm device
+ * to create and access the isp user BO.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+int amdgpu_bo_create_isp_user(struct amdgpu_device *adev,
+ struct dma_buf *dma_buf, u32 domain, struct amdgpu_bo **bo,
+ u64 *gpu_addr)
+
+{
+ struct drm_gem_object *gem_obj;
+ int r;
+
+ gem_obj = amdgpu_gem_prime_import(&adev->ddev, dma_buf);
+ *bo = gem_to_amdgpu_bo(gem_obj);
+ if (!(*bo)) {
+ dev_err(adev->dev, "failed to get valid isp user bo\n");
+ return -EINVAL;
}
- r = amdgpu_bo_reserve(*bo_ptr, false);
+ r = amdgpu_bo_reserve(*bo, false);
if (r) {
- dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
- goto error_free;
+ dev_err(adev->dev, "(%d) failed to reserve isp user bo\n", r);
+ return r;
}
- r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr);
+ r = amdgpu_bo_pin(*bo, domain);
if (r) {
- dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
+ dev_err(adev->dev, "(%d) isp user bo pin failed\n", r);
goto error_unreserve;
}
- if (cpu_addr) {
- r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
- if (r) {
- dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
- goto error_unreserve;
- }
+ r = amdgpu_ttm_alloc_gart(&(*bo)->tbo);
+ if (r) {
+ dev_err(adev->dev, "%p bind failed\n", *bo);
+ goto error_unpin;
}
- amdgpu_bo_unreserve(*bo_ptr);
+ if (!WARN_ON(!gpu_addr))
+ *gpu_addr = amdgpu_bo_gpu_offset(*bo);
+
+ amdgpu_bo_unreserve(*bo);
return 0;
+error_unpin:
+ amdgpu_bo_unpin(*bo);
error_unreserve:
+ amdgpu_bo_unreserve(*bo);
+ amdgpu_bo_unref(bo);
+
+ return r;
+}
+
+/**
+ * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
+ *
+ * @adev: amdgpu device object
+ * @offset: offset of the BO
+ * @size: size of the BO
+ * @bo_ptr: used to initialize BOs in structures
+ * @cpu_addr: optional CPU address mapping
+ *
+ * Creates a kernel BO at a specific offset in VRAM.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
+ */
+int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
+ uint64_t offset, uint64_t size,
+ struct amdgpu_bo **bo_ptr, void **cpu_addr)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ unsigned int i;
+ int r;
+
+ offset &= PAGE_MASK;
+ size = ALIGN(size, PAGE_SIZE);
+
+ r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, bo_ptr, NULL,
+ cpu_addr);
+ if (r)
+ return r;
+
+ if ((*bo_ptr) == NULL)
+ return 0;
+
+ /*
+ * Remove the original mem node and create a new one at the request
+ * position.
+ */
+ if (cpu_addr)
+ amdgpu_bo_kunmap(*bo_ptr);
+
+ ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.resource);
+
+ for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
+ (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
+ (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
+ }
+ r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
+ &(*bo_ptr)->tbo.resource, &ctx);
+ if (r)
+ goto error;
+
+ if (cpu_addr) {
+ r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
+ if (r)
+ goto error;
+ }
+
amdgpu_bo_unreserve(*bo_ptr);
+ return 0;
-error_free:
+error:
+ amdgpu_bo_unreserve(*bo_ptr);
amdgpu_bo_unref(bo_ptr);
-
return r;
}
@@ -286,8 +500,13 @@ error_free:
* amdgpu_bo_free_kernel - free BO for kernel use
*
* @bo: amdgpu BO to free
+ * @gpu_addr: pointer to where the BO's GPU memory space address was stored
+ * @cpu_addr: pointer to where the BO's CPU memory space address was stored
*
* unmaps and unpin a BO for kernel internal use.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to free the kernel BO.
*/
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
void **cpu_addr)
@@ -295,6 +514,8 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
if (*bo == NULL)
return;
+ WARN_ON(amdgpu_ttm_adev((*bo)->tbo.bdev)->in_suspend);
+
if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
if (cpu_addr)
amdgpu_bo_kunmap(*bo);
@@ -311,63 +532,67 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
*cpu_addr = NULL;
}
-int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
- unsigned long size, int byte_align,
- bool kernel, u32 domain, u64 flags,
- struct sg_table *sg,
- struct ttm_placement *placement,
- struct reservation_object *resv,
- struct amdgpu_bo **bo_ptr)
+/**
+ * amdgpu_bo_free_isp_user - free BO for isp use
+ *
+ * @bo: amdgpu isp user BO to free
+ *
+ * unpin and unref BO for isp internal use.
+ *
+ * This function is exported to allow the V4L2 isp device
+ * external to drm device to free the isp user BO.
+ */
+void amdgpu_bo_free_isp_user(struct amdgpu_bo *bo)
{
- struct amdgpu_bo *bo;
- enum ttm_bo_type type;
- unsigned long page_align;
- u64 initial_bytes_moved;
- size_t acc_size;
- int r;
-
- page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
- size = ALIGN(size, PAGE_SIZE);
+ if (bo == NULL)
+ return;
- if (kernel) {
- type = ttm_bo_type_kernel;
- } else if (sg) {
- type = ttm_bo_type_sg;
- } else {
- type = ttm_bo_type_device;
+ if (amdgpu_bo_reserve(bo, true) == 0) {
+ amdgpu_bo_unpin(bo);
+ amdgpu_bo_unreserve(bo);
}
- *bo_ptr = NULL;
+ amdgpu_bo_unref(&bo);
+}
- acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
- sizeof(struct amdgpu_bo));
+/* Validate bo size is bit bigger than the request domain */
+static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
+ unsigned long size, u32 domain)
+{
+ struct ttm_resource_manager *man = NULL;
- bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
- if (bo == NULL)
- return -ENOMEM;
- r = drm_gem_object_init(adev->ddev, &bo->gem_base, size);
- if (unlikely(r)) {
- kfree(bo);
- return r;
+ /*
+ * If GTT is part of requested domains the check must succeed to
+ * allow fall back to GTT.
+ */
+ if (domain & AMDGPU_GEM_DOMAIN_GTT)
+ man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
+ else if (domain & AMDGPU_GEM_DOMAIN_VRAM)
+ man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
+ else
+ return true;
+
+ if (!man) {
+ if (domain & AMDGPU_GEM_DOMAIN_GTT)
+ WARN_ON_ONCE("GTT domain requested but GTT mem manager uninitialized");
+ return false;
}
- INIT_LIST_HEAD(&bo->shadow_list);
- INIT_LIST_HEAD(&bo->va);
- bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM |
- AMDGPU_GEM_DOMAIN_GTT |
- AMDGPU_GEM_DOMAIN_CPU |
- AMDGPU_GEM_DOMAIN_GDS |
- AMDGPU_GEM_DOMAIN_GWS |
- AMDGPU_GEM_DOMAIN_OA);
- bo->allowed_domains = bo->prefered_domains;
- if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
- bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
- bo->flags = flags;
+ /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU, _DOMAIN_DOORBELL */
+ if (size < man->size)
+ return true;
+
+ DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size, man->size);
+ return false;
+}
+
+bool amdgpu_bo_support_uswc(u64 bo_flags)
+{
#ifdef CONFIG_X86_32
/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
* See https://bugs.freedesktop.org/show_bug.cgi?id=84627
*/
- bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ return false;
#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
/* Don't try to enable write-combining when it can't work, or things
* may be slow
@@ -379,422 +604,439 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
thanks to write-combining
#endif
- if (bo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
+ if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
"better performance thanks to write-combining\n");
- bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ return false;
#else
/* For architectures that don't support WC memory,
* mask out the WC flag from the BO
*/
if (!drm_arch_can_wc_memory())
- bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ return false;
+
+ return true;
#endif
+}
- amdgpu_fill_placement_to_bo(bo, placement);
- /* Kernel allocation are uninterruptible */
+/**
+ * amdgpu_bo_create - create an &amdgpu_bo buffer object
+ * @adev: amdgpu device object
+ * @bp: parameters to be used for the buffer object
+ * @bo_ptr: pointer to the buffer object pointer
+ *
+ * Creates an &amdgpu_bo buffer object.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
+int amdgpu_bo_create(struct amdgpu_device *adev,
+ struct amdgpu_bo_param *bp,
+ struct amdgpu_bo **bo_ptr)
+{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = (bp->type != ttm_bo_type_kernel),
+ .no_wait_gpu = bp->no_wait_gpu,
+ /* We opt to avoid OOM on system pages allocations */
+ .gfp_retry_mayfail = true,
+ .allow_res_evict = bp->type != ttm_bo_type_kernel,
+ .resv = bp->resv
+ };
+ struct amdgpu_bo *bo;
+ unsigned long page_align, size = bp->size;
+ int r;
+
+ /* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
+ if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
+ /* GWS and OA don't need any alignment. */
+ page_align = bp->byte_align;
+ size <<= PAGE_SHIFT;
+
+ } else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
+ /* Both size and alignment must be a multiple of 4. */
+ page_align = ALIGN(bp->byte_align, 4);
+ size = ALIGN(size, 4) << PAGE_SHIFT;
+ } else {
+ /* Memory should be aligned at least to a page size. */
+ page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
+ size = ALIGN(size, PAGE_SIZE);
+ }
+
+ if (!amdgpu_bo_validate_size(adev, size, bp->domain))
+ return -ENOMEM;
- initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
- r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
- &bo->placement, page_align, !kernel, NULL,
- acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
- amdgpu_cs_report_moved_bytes(adev,
- atomic64_read(&adev->num_bytes_moved) - initial_bytes_moved);
+ BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
+
+ *bo_ptr = NULL;
+ bo = kvzalloc(bp->bo_ptr_size, GFP_KERNEL);
+ if (bo == NULL)
+ return -ENOMEM;
+ drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
+ bo->tbo.base.funcs = &amdgpu_gem_object_funcs;
+ bo->vm_bo = NULL;
+ bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
+ bp->domain;
+ bo->allowed_domains = bo->preferred_domains;
+ if (bp->type != ttm_bo_type_kernel &&
+ !(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE) &&
+ bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
+ bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
+
+ bo->flags = bp->flags;
+
+ if (adev->gmc.mem_partitions)
+ /* For GPUs with spatial partitioning, bo->xcp_id=-1 means any partition */
+ bo->xcp_id = bp->xcp_id_plus1 - 1;
+ else
+ /* For GPUs without spatial partitioning */
+ bo->xcp_id = 0;
+ if (!amdgpu_bo_support_uswc(bo->flags))
+ bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+
+ bo->tbo.bdev = &adev->mman.bdev;
+ if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
+ AMDGPU_GEM_DOMAIN_GDS))
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+ else
+ amdgpu_bo_placement_from_domain(bo, bp->domain);
+ if (bp->type == ttm_bo_type_kernel)
+ bo->tbo.priority = 2;
+ else if (!(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE))
+ bo->tbo.priority = 1;
+
+ if (!bp->destroy)
+ bp->destroy = &amdgpu_bo_destroy;
+
+ r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, bp->type,
+ &bo->placement, page_align, &ctx, NULL,
+ bp->resv, bp->destroy);
if (unlikely(r != 0))
return r;
- if (kernel)
- bo->tbo.priority = 1;
+ if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
+ amdgpu_res_cpu_visible(adev, bo->tbo.resource))
+ amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
+ ctx.bytes_moved);
+ else
+ amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
- if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
- bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
+ if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
+ bo->tbo.resource->mem_type == TTM_PL_VRAM) {
struct dma_fence *fence;
- r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence);
+ r = amdgpu_ttm_clear_buffer(bo, bo->tbo.base.resv, &fence);
if (unlikely(r))
goto fail_unreserve;
- amdgpu_bo_fence(bo, fence, false);
- dma_fence_put(bo->tbo.moving);
- bo->tbo.moving = dma_fence_get(fence);
+ dma_resv_add_fence(bo->tbo.base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
dma_fence_put(fence);
}
- if (!resv)
+ if (!bp->resv)
amdgpu_bo_unreserve(bo);
*bo_ptr = bo;
trace_amdgpu_bo_create(bo);
+ /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
+ if (bp->type == ttm_bo_type_device)
+ bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+
return 0;
fail_unreserve:
- if (!resv)
- ww_mutex_unlock(&bo->tbo.resv->lock);
+ if (!bp->resv)
+ dma_resv_unlock(bo->tbo.base.resv);
amdgpu_bo_unref(&bo);
return r;
}
-static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
- unsigned long size, int byte_align,
- struct amdgpu_bo *bo)
-{
- struct ttm_placement placement = {0};
- struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
- int r;
-
- if (bo->shadow)
- return 0;
-
- bo->flags |= AMDGPU_GEM_CREATE_SHADOW;
- memset(&placements, 0,
- (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
-
- amdgpu_ttm_placement_init(adev, &placement,
- placements, AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_CPU_GTT_USWC);
-
- r = amdgpu_bo_create_restricted(adev, size, byte_align, true,
- AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_CPU_GTT_USWC,
- NULL, &placement,
- bo->tbo.resv,
- &bo->shadow);
- if (!r) {
- bo->shadow->parent = amdgpu_bo_ref(bo);
- mutex_lock(&adev->shadow_list_lock);
- list_add_tail(&bo->shadow_list, &adev->shadow_list);
- mutex_unlock(&adev->shadow_list_lock);
- }
-
- return r;
-}
+/**
+ * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object
+ * @adev: amdgpu device object
+ * @bp: parameters to be used for the buffer object
+ * @ubo_ptr: pointer to the buffer object pointer
+ *
+ * Create a BO to be used by user application;
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
-int amdgpu_bo_create(struct amdgpu_device *adev,
- unsigned long size, int byte_align,
- bool kernel, u32 domain, u64 flags,
- struct sg_table *sg,
- struct reservation_object *resv,
- struct amdgpu_bo **bo_ptr)
+int amdgpu_bo_create_user(struct amdgpu_device *adev,
+ struct amdgpu_bo_param *bp,
+ struct amdgpu_bo_user **ubo_ptr)
{
- struct ttm_placement placement = {0};
- struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+ struct amdgpu_bo *bo_ptr;
int r;
- memset(&placements, 0,
- (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place));
-
- amdgpu_ttm_placement_init(adev, &placement,
- placements, domain, flags);
-
- r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
- domain, flags, sg, &placement,
- resv, bo_ptr);
+ bp->bo_ptr_size = sizeof(struct amdgpu_bo_user);
+ bp->destroy = &amdgpu_bo_user_destroy;
+ r = amdgpu_bo_create(adev, bp, &bo_ptr);
if (r)
return r;
- if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) {
- if (!resv) {
- r = ww_mutex_lock(&(*bo_ptr)->tbo.resv->lock, NULL);
- WARN_ON(r != 0);
- }
-
- r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr));
-
- if (!resv)
- ww_mutex_unlock(&(*bo_ptr)->tbo.resv->lock);
-
- if (r)
- amdgpu_bo_unref(bo_ptr);
- }
-
+ *ubo_ptr = to_amdgpu_bo_user(bo_ptr);
return r;
}
-int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct reservation_object *resv,
- struct dma_fence **fence,
- bool direct)
-
-{
- struct amdgpu_bo *shadow = bo->shadow;
- uint64_t bo_addr, shadow_addr;
- int r;
-
- if (!shadow)
- return -EINVAL;
-
- bo_addr = amdgpu_bo_gpu_offset(bo);
- shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
-
- r = reservation_object_reserve_shared(bo->tbo.resv);
- if (r)
- goto err;
-
- r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr,
- amdgpu_bo_size(bo), resv, fence,
- direct);
- if (!r)
- amdgpu_bo_fence(bo, *fence, true);
-
-err:
- return r;
-}
-
-int amdgpu_bo_validate(struct amdgpu_bo *bo)
-{
- uint32_t domain;
- int r;
-
- if (bo->pin_count)
- return 0;
-
- domain = bo->prefered_domains;
-
-retry:
- amdgpu_ttm_placement_from_domain(bo, domain);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
- domain = bo->allowed_domains;
- goto retry;
- }
-
- return r;
-}
-
-int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct reservation_object *resv,
- struct dma_fence **fence,
- bool direct)
+/**
+ * amdgpu_bo_create_vm - create an &amdgpu_bo_vm buffer object
+ * @adev: amdgpu device object
+ * @bp: parameters to be used for the buffer object
+ * @vmbo_ptr: pointer to the buffer object pointer
+ *
+ * Create a BO to be for GPUVM.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
+int amdgpu_bo_create_vm(struct amdgpu_device *adev,
+ struct amdgpu_bo_param *bp,
+ struct amdgpu_bo_vm **vmbo_ptr)
{
- struct amdgpu_bo *shadow = bo->shadow;
- uint64_t bo_addr, shadow_addr;
+ struct amdgpu_bo *bo_ptr;
int r;
- if (!shadow)
- return -EINVAL;
-
- bo_addr = amdgpu_bo_gpu_offset(bo);
- shadow_addr = amdgpu_bo_gpu_offset(bo->shadow);
-
- r = reservation_object_reserve_shared(bo->tbo.resv);
+ /* bo_ptr_size will be determined by the caller and it depends on
+ * num of amdgpu_vm_pt entries.
+ */
+ BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
+ r = amdgpu_bo_create(adev, bp, &bo_ptr);
if (r)
- goto err;
-
- r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr,
- amdgpu_bo_size(bo), resv, fence,
- direct);
- if (!r)
- amdgpu_bo_fence(bo, *fence, true);
+ return r;
-err:
+ *vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
return r;
}
+/**
+ * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be mapped
+ * @ptr: kernel virtual address to be returned
+ *
+ * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
+ * amdgpu_bo_kptr() to get the kernel virtual address.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
{
- bool is_iomem;
+ void *kptr;
long r;
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
return -EPERM;
- if (bo->kptr) {
- if (ptr) {
- *ptr = bo->kptr;
- }
- return 0;
- }
-
- r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false,
- MAX_SCHEDULE_TIMEOUT);
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL,
+ false, MAX_SCHEDULE_TIMEOUT);
if (r < 0)
return r;
- r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
+ kptr = amdgpu_bo_kptr(bo);
+ if (kptr) {
+ if (ptr)
+ *ptr = kptr;
+ return 0;
+ }
+
+ r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap);
if (r)
return r;
- bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
if (ptr)
- *ptr = bo->kptr;
+ *ptr = amdgpu_bo_kptr(bo);
return 0;
}
+/**
+ * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
+ * @bo: &amdgpu_bo buffer object
+ *
+ * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
+ *
+ * Returns:
+ * the virtual address of a buffer object area.
+ */
+void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
+{
+ bool is_iomem;
+
+ return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
+}
+
+/**
+ * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be unmapped
+ *
+ * Unmaps a kernel map set up by amdgpu_bo_kmap().
+ */
void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
{
- if (bo->kptr == NULL)
- return;
- bo->kptr = NULL;
- ttm_bo_kunmap(&bo->kmap);
+ if (bo->kmap.bo)
+ ttm_bo_kunmap(&bo->kmap);
}
+/**
+ * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object
+ *
+ * References the contained &ttm_buffer_object.
+ *
+ * Returns:
+ * a refcounted pointer to the &amdgpu_bo buffer object.
+ */
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
{
if (bo == NULL)
return NULL;
- ttm_bo_reference(&bo->tbo);
+ drm_gem_object_get(&bo->tbo.base);
return bo;
}
+/**
+ * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object
+ *
+ * Unreferences the contained &ttm_buffer_object and clear the pointer
+ */
void amdgpu_bo_unref(struct amdgpu_bo **bo)
{
- struct ttm_buffer_object *tbo;
-
if ((*bo) == NULL)
return;
- tbo = &((*bo)->tbo);
- ttm_bo_unref(&tbo);
- if (tbo == NULL)
- *bo = NULL;
+ drm_gem_object_put(&(*bo)->tbo.base);
+ *bo = NULL;
}
-int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
- u64 min_offset, u64 max_offset,
- u64 *gpu_addr)
+/**
+ * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be pinned
+ * @domain: domain to be pinned to
+ *
+ * Pins the buffer object according to requested domain. If the memory is
+ * unbound gart memory, binds the pages into gart table. Adjusts pin_count and
+ * pin_size accordingly.
+ *
+ * Pinning means to lock pages in memory along with keeping them at a fixed
+ * offset. It is required when a buffer can not be moved, for example, when
+ * a display buffer is being scanned out.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
+int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_operation_ctx ctx = { false, false };
int r, i;
- unsigned fpfn, lpfn;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
return -EPERM;
- if (WARN_ON_ONCE(min_offset > max_offset))
- return -EINVAL;
+ /* Check domain to be pinned to against preferred domains */
+ if (bo->preferred_domains & domain)
+ domain = bo->preferred_domains & domain;
/* A shared bo cannot be migrated to VRAM */
- if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM))
- return -EINVAL;
+ if (drm_gem_is_imported(&bo->tbo.base)) {
+ if (domain & AMDGPU_GEM_DOMAIN_GTT)
+ domain = AMDGPU_GEM_DOMAIN_GTT;
+ else
+ return -EINVAL;
+ }
- if (bo->pin_count) {
- uint32_t mem_type = bo->tbo.mem.mem_type;
+ if (bo->tbo.pin_count) {
+ uint32_t mem_type = bo->tbo.resource->mem_type;
+ uint32_t mem_flags = bo->tbo.resource->placement;
- if (domain != amdgpu_mem_type_to_domain(mem_type))
+ if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
return -EINVAL;
- bo->pin_count++;
- if (gpu_addr)
- *gpu_addr = amdgpu_bo_gpu_offset(bo);
-
- if (max_offset != 0) {
- u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
- WARN_ON_ONCE(max_offset <
- (amdgpu_bo_gpu_offset(bo) - domain_start));
- }
+ if ((mem_type == TTM_PL_VRAM) &&
+ (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) &&
+ !(mem_flags & TTM_PL_FLAG_CONTIGUOUS))
+ return -EINVAL;
+ ttm_bo_pin(&bo->tbo);
return 0;
}
- bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- amdgpu_ttm_placement_from_domain(bo, domain);
+ /* This assumes only APU display buffers are pinned with (VRAM|GTT).
+ * See function amdgpu_display_supported_domains()
+ */
+ domain = amdgpu_bo_get_preferred_domain(adev, domain);
+
+ if (drm_gem_is_imported(&bo->tbo.base))
+ dma_buf_pin(bo->tbo.base.import_attach);
+
+ /* force to pin into visible video ram */
+ if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
+ bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ amdgpu_bo_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) {
- /* force to pin into visible video ram */
- if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
- !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
- (!max_offset || max_offset >
- adev->mc.visible_vram_size)) {
- if (WARN_ON_ONCE(min_offset >
- adev->mc.visible_vram_size))
- return -EINVAL;
- fpfn = min_offset >> PAGE_SHIFT;
- lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
- } else {
- fpfn = min_offset >> PAGE_SHIFT;
- lpfn = max_offset >> PAGE_SHIFT;
- }
- if (fpfn > bo->placements[i].fpfn)
- bo->placements[i].fpfn = fpfn;
- if (!bo->placements[i].lpfn ||
- (lpfn && lpfn < bo->placements[i].lpfn))
- bo->placements[i].lpfn = lpfn;
- bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS &&
+ bo->placements[i].mem_type == TTM_PL_VRAM)
+ bo->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
}
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r)) {
dev_err(adev->dev, "%p pin failed\n", bo);
goto error;
}
- r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
- if (unlikely(r)) {
- dev_err(adev->dev, "%p bind failed\n", bo);
- goto error;
- }
- bo->pin_count = 1;
- if (gpu_addr != NULL)
- *gpu_addr = amdgpu_bo_gpu_offset(bo);
- if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
- adev->vram_pin_size += amdgpu_bo_size(bo);
- if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
- adev->invisible_pin_size += amdgpu_bo_size(bo);
- } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
- adev->gart_pin_size += amdgpu_bo_size(bo);
+ ttm_bo_pin(&bo->tbo);
+
+ if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
+ atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
+ atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
+ &adev->visible_pin_size);
+ } else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
+ atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
}
error:
return r;
}
-int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
-{
- return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr);
-}
-
-int amdgpu_bo_unpin(struct amdgpu_bo *bo)
+/**
+ * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
+ * @bo: &amdgpu_bo buffer object to be unpinned
+ *
+ * Decreases the pin_count, and clears the flags if pin_count reaches 0.
+ * Changes placement and pin size accordingly.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
+void amdgpu_bo_unpin(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- int r, i;
- if (!bo->pin_count) {
- dev_warn(adev->dev, "%p unpin not necessary\n", bo);
- return 0;
- }
- bo->pin_count--;
- if (bo->pin_count)
- return 0;
- for (i = 0; i < bo->placement.num_placement; i++) {
- bo->placements[i].lpfn = 0;
- bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- }
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (unlikely(r)) {
- dev_err(adev->dev, "%p validate failed for unpin\n", bo);
- goto error;
- }
+ ttm_bo_unpin(&bo->tbo);
+ if (bo->tbo.pin_count)
+ return;
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
- adev->vram_pin_size -= amdgpu_bo_size(bo);
- if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
- adev->invisible_pin_size -= amdgpu_bo_size(bo);
- } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
- adev->gart_pin_size -= amdgpu_bo_size(bo);
- }
+ if (drm_gem_is_imported(&bo->tbo.base))
+ dma_buf_unpin(bo->tbo.base.import_attach);
-error:
- return r;
-}
-
-int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
-{
- /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
- if (0 && (adev->flags & AMD_IS_APU)) {
- /* Useless to evict on IGP chips */
- return 0;
+ if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
+ atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
+ atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
+ &adev->visible_pin_size);
+ } else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
+ atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
}
- return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
+
}
-static const char *amdgpu_vram_names[] = {
+static const char * const amdgpu_vram_names[] = {
"UNKNOWN",
"GDDR1",
"DDR2",
@@ -802,69 +1044,143 @@ static const char *amdgpu_vram_names[] = {
"GDDR4",
"GDDR5",
"HBM",
- "DDR3"
+ "DDR3",
+ "DDR4",
+ "GDDR6",
+ "DDR5",
+ "LPDDR4",
+ "LPDDR5",
+ "HBM3E"
};
+/**
+ * amdgpu_bo_init - initialize memory manager
+ * @adev: amdgpu device object
+ *
+ * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_init(struct amdgpu_device *adev)
{
- /* reserve PAT memory space to WC for VRAM */
- arch_io_reserve_memtype_wc(adev->mc.aper_base,
- adev->mc.aper_size);
+ /* On A+A platform, VRAM can be mapped as WB */
+ if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
+ /* reserve PAT memory space to WC for VRAM */
+ int r = arch_io_reserve_memtype_wc(adev->gmc.aper_base,
+ adev->gmc.aper_size);
+
+ if (r) {
+ DRM_ERROR("Unable to set WC memtype for the aperture base\n");
+ return r;
+ }
+
+ /* Add an MTRR for the VRAM */
+ adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
+ adev->gmc.aper_size);
+ }
- /* Add an MTRR for the VRAM */
- adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
- adev->mc.aper_size);
DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
- adev->mc.mc_vram_size >> 20,
- (unsigned long long)adev->mc.aper_size >> 20);
+ adev->gmc.mc_vram_size >> 20,
+ (unsigned long long)adev->gmc.aper_size >> 20);
DRM_INFO("RAM width %dbits %s\n",
- adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]);
+ adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
return amdgpu_ttm_init(adev);
}
+/**
+ * amdgpu_bo_fini - tear down memory manager
+ * @adev: amdgpu device object
+ *
+ * Reverses amdgpu_bo_init() to tear down memory manager.
+ */
void amdgpu_bo_fini(struct amdgpu_device *adev)
{
+ int idx;
+
amdgpu_ttm_fini(adev);
- arch_phys_wc_del(adev->mc.vram_mtrr);
- arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);
-}
-int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
- struct vm_area_struct *vma)
-{
- return ttm_fbdev_mmap(vma, &bo->tbo);
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
+ arch_phys_wc_del(adev->gmc.vram_mtrr);
+ arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
+ }
+ drm_dev_exit(idx);
+ }
}
+/**
+ * amdgpu_bo_set_tiling_flags - set tiling flags
+ * @bo: &amdgpu_bo buffer object
+ * @tiling_flags: new flags
+ *
+ * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
+ * kernel driver to set the tiling flags on a buffer.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct amdgpu_bo_user *ubo;
+ BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
if (adev->family <= AMDGPU_FAMILY_CZ &&
AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
return -EINVAL;
- bo->tiling_flags = tiling_flags;
+ ubo = to_amdgpu_bo_user(bo);
+ ubo->tiling_flags = tiling_flags;
return 0;
}
+/**
+ * amdgpu_bo_get_tiling_flags - get tiling flags
+ * @bo: &amdgpu_bo buffer object
+ * @tiling_flags: returned flags
+ *
+ * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
+ * set the tiling flags on a buffer.
+ */
void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
{
- lockdep_assert_held(&bo->tbo.resv->lock.base);
+ struct amdgpu_bo_user *ubo;
+
+ BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
+ dma_resv_assert_held(bo->tbo.base.resv);
+ ubo = to_amdgpu_bo_user(bo);
if (tiling_flags)
- *tiling_flags = bo->tiling_flags;
+ *tiling_flags = ubo->tiling_flags;
}
-int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
- uint32_t metadata_size, uint64_t flags)
+/**
+ * amdgpu_bo_set_metadata - set metadata
+ * @bo: &amdgpu_bo buffer object
+ * @metadata: new metadata
+ * @metadata_size: size of the new metadata
+ * @flags: flags of the new metadata
+ *
+ * Sets buffer object's metadata, its size and flags.
+ * Used via GEM ioctl.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
+int amdgpu_bo_set_metadata(struct amdgpu_bo *bo, void *metadata,
+ u32 metadata_size, uint64_t flags)
{
+ struct amdgpu_bo_user *ubo;
void *buffer;
+ BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
+ ubo = to_amdgpu_bo_user(bo);
if (!metadata_size) {
- if (bo->metadata_size) {
- kfree(bo->metadata);
- bo->metadata = NULL;
- bo->metadata_size = 0;
+ if (ubo->metadata_size) {
+ kfree(ubo->metadata);
+ ubo->metadata = NULL;
+ ubo->metadata_size = 0;
}
return 0;
}
@@ -876,112 +1192,196 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
if (buffer == NULL)
return -ENOMEM;
- kfree(bo->metadata);
- bo->metadata_flags = flags;
- bo->metadata = buffer;
- bo->metadata_size = metadata_size;
+ kfree(ubo->metadata);
+ ubo->metadata_flags = flags;
+ ubo->metadata = buffer;
+ ubo->metadata_size = metadata_size;
return 0;
}
+/**
+ * amdgpu_bo_get_metadata - get metadata
+ * @bo: &amdgpu_bo buffer object
+ * @buffer: returned metadata
+ * @buffer_size: size of the buffer
+ * @metadata_size: size of the returned metadata
+ * @flags: flags of the returned metadata
+ *
+ * Gets buffer object's metadata, its size and flags. buffer_size shall not be
+ * less than metadata_size.
+ * Used via GEM ioctl.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
size_t buffer_size, uint32_t *metadata_size,
uint64_t *flags)
{
+ struct amdgpu_bo_user *ubo;
+
if (!buffer && !metadata_size)
return -EINVAL;
+ BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
+ ubo = to_amdgpu_bo_user(bo);
+ if (metadata_size)
+ *metadata_size = ubo->metadata_size;
+
if (buffer) {
- if (buffer_size < bo->metadata_size)
+ if (buffer_size < ubo->metadata_size)
return -EINVAL;
- if (bo->metadata_size)
- memcpy(buffer, bo->metadata, bo->metadata_size);
+ if (ubo->metadata_size)
+ memcpy(buffer, ubo->metadata, ubo->metadata_size);
}
- if (metadata_size)
- *metadata_size = bo->metadata_size;
if (flags)
- *flags = bo->metadata_flags;
+ *flags = ubo->metadata_flags;
return 0;
}
+/**
+ * amdgpu_bo_move_notify - notification about a memory move
+ * @bo: pointer to a buffer object
+ * @evict: if this move is evicting the buffer from the graphics address space
+ * @new_mem: new resource for backing the BO
+ *
+ * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
+ * bookkeeping.
+ * TTM driver callback which is called when ttm moves a buffer.
+ */
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
- struct ttm_mem_reg *new_mem)
+ struct ttm_resource *new_mem)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ struct ttm_resource *old_mem = bo->resource;
struct amdgpu_bo *abo;
- struct ttm_mem_reg *old_mem = &bo->mem;
- if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
+ if (!amdgpu_bo_is_amdgpu_bo(bo))
return;
- abo = container_of(bo, struct amdgpu_bo, tbo);
- amdgpu_vm_bo_invalidate(adev, abo);
+ abo = ttm_to_amdgpu_bo(bo);
+ amdgpu_vm_bo_move(abo, new_mem, evict);
- /* remember the eviction */
- if (evict)
- atomic64_inc(&adev->num_evictions);
+ amdgpu_bo_kunmap(abo);
- /* update statistics */
- if (!new_mem)
- return;
+ if (abo->tbo.base.dma_buf && !drm_gem_is_imported(&abo->tbo.base) &&
+ old_mem && old_mem->mem_type != TTM_PL_SYSTEM)
+ dma_buf_move_notify(abo->tbo.base.dma_buf);
/* move_notify is called before move happens */
- amdgpu_update_memory_usage(adev, &bo->mem, new_mem);
-
- trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
+ trace_amdgpu_bo_move(abo, new_mem ? new_mem->mem_type : -1,
+ old_mem ? old_mem->mem_type : -1);
}
-int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+/**
+ * amdgpu_bo_release_notify - notification about a BO being released
+ * @bo: pointer to a buffer object
+ *
+ * Wipes VRAM buffers whose contents should not be leaked before the
+ * memory is released.
+ */
+void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ struct dma_fence *fence = NULL;
struct amdgpu_bo *abo;
- unsigned long offset, size, lpfn;
- int i, r;
+ int r;
- if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
- return 0;
+ if (!amdgpu_bo_is_amdgpu_bo(bo))
+ return;
- abo = container_of(bo, struct amdgpu_bo, tbo);
- if (bo->mem.mem_type != TTM_PL_VRAM)
- return 0;
+ abo = ttm_to_amdgpu_bo(bo);
+
+ WARN_ON(abo->vm_bo);
+
+ if (abo->kfd_bo)
+ amdgpu_amdkfd_release_notify(abo);
+
+ /*
+ * We lock the private dma_resv object here and since the BO is about to
+ * be released nobody else should have a pointer to it.
+ * So when this locking here fails something is wrong with the reference
+ * counting.
+ */
+ if (WARN_ON_ONCE(!dma_resv_trylock(&bo->base._resv)))
+ return;
+
+ amdgpu_amdkfd_remove_all_eviction_fences(abo);
+
+ if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
+ !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
+ adev->in_suspend || drm_dev_is_unplugged(adev_to_drm(adev)))
+ goto out;
- size = bo->mem.num_pages << PAGE_SHIFT;
- offset = bo->mem.start << PAGE_SHIFT;
- /* TODO: figure out how to map scattered VRAM to the CPU */
- if ((offset + size) <= adev->mc.visible_vram_size)
+ r = dma_resv_reserve_fences(&bo->base._resv, 1);
+ if (r)
+ goto out;
+
+ r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true,
+ AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE);
+ if (WARN_ON(r))
+ goto out;
+
+ amdgpu_vram_mgr_set_cleared(bo->resource);
+ dma_resv_add_fence(&bo->base._resv, fence, DMA_RESV_USAGE_KERNEL);
+ dma_fence_put(fence);
+
+out:
+ dma_resv_unlock(&bo->base._resv);
+}
+
+/**
+ * amdgpu_bo_fault_reserve_notify - notification about a memory fault
+ * @bo: pointer to a buffer object
+ *
+ * Notifies the driver we are taking a fault on this BO and have reserved it,
+ * also performs bookkeeping.
+ * TTM driver callback for dealing with vm faults.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
+vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
+ int r;
+
+ /* Remember that this BO was accessed by the CPU */
+ abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+
+ if (amdgpu_res_cpu_visible(adev, bo->resource))
return 0;
/* Can't move a pinned BO to visible VRAM */
- if (abo->pin_count > 0)
- return -EINVAL;
+ if (abo->tbo.pin_count > 0)
+ return VM_FAULT_SIGBUS;
/* hurrah the memory is not visible ! */
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
- lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
- for (i = 0; i < abo->placement.num_placement; i++) {
- /* Force into visible VRAM */
- if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
- (!abo->placements[i].lpfn ||
- abo->placements[i].lpfn > lpfn))
- abo->placements[i].lpfn = lpfn;
- }
- r = ttm_bo_validate(bo, &abo->placement, false, false);
- if (unlikely(r == -ENOMEM)) {
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
- return ttm_bo_validate(bo, &abo->placement, false, false);
- } else if (unlikely(r != 0)) {
- return r;
- }
+ atomic64_inc(&adev->num_vram_cpu_page_faults);
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT);
+
+ /* Avoid costly evictions; only set GTT as a busy placement */
+ abo->placements[0].flags |= TTM_PL_FLAG_DESIRED;
+
+ r = ttm_bo_validate(bo, &abo->placement, &ctx);
+ if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
+ return VM_FAULT_NOPAGE;
+ else if (unlikely(r))
+ return VM_FAULT_SIGBUS;
- offset = bo->mem.start << PAGE_SHIFT;
/* this should never happen */
- if ((offset + size) > adev->mc.visible_vram_size)
- return -EINVAL;
+ if (bo->resource->mem_type == TTM_PL_VRAM &&
+ !amdgpu_res_cpu_visible(adev, bo->resource))
+ return VM_FAULT_SIGBUS;
+ ttm_bo_move_to_lru_tail_unlocked(bo);
return 0;
}
@@ -996,33 +1396,293 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared)
{
- struct reservation_object *resv = bo->tbo.resv;
+ struct dma_resv *resv = bo->tbo.base.resv;
+ int r;
- if (shared)
- reservation_object_add_shared_fence(resv, fence);
- else
- reservation_object_add_excl_fence(resv, fence);
+ r = dma_resv_reserve_fences(resv, 1);
+ if (r) {
+ /* As last resort on OOM we block for the fence */
+ dma_fence_wait(fence, false);
+ return;
+ }
+
+ dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ :
+ DMA_RESV_USAGE_WRITE);
+}
+
+/**
+ * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences
+ *
+ * @adev: amdgpu device pointer
+ * @resv: reservation object to sync to
+ * @sync_mode: synchronization mode
+ * @owner: fence owner
+ * @intr: Whether the wait is interruptible
+ *
+ * Extract the fences from the reservation object and waits for them to finish.
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
+ */
+int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
+ enum amdgpu_sync_mode sync_mode, void *owner,
+ bool intr)
+{
+ struct amdgpu_sync sync;
+ int r;
+
+ amdgpu_sync_create(&sync);
+ amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
+ r = amdgpu_sync_wait(&sync, intr);
+ amdgpu_sync_free(&sync);
+ return r;
+}
+
+/**
+ * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv
+ * @bo: buffer object to wait for
+ * @owner: fence owner
+ * @intr: Whether the wait is interruptible
+ *
+ * Wrapper to wait for fences in a BO.
+ * Returns:
+ * 0 on success, errno otherwise.
+ */
+int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
+ return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
+ AMDGPU_SYNC_NE_OWNER, owner, intr);
}
/**
* amdgpu_bo_gpu_offset - return GPU offset of bo
* @bo: amdgpu object for which we query the offset
*
- * Returns current GPU offset of the object.
- *
* Note: object should either be pinned or reserved when calling this
* function, it might be useful to add check for this for debugging.
+ *
+ * Returns:
+ * current GPU offset of the object.
*/
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
{
- WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
- WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
- !amdgpu_ttm_is_bound(bo->tbo.ttm));
- WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
- !bo->pin_count);
- WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
- WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+ WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM);
+ WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
+ !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
+ WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET);
+ WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM &&
!(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
- return bo->tbo.offset;
+ return amdgpu_bo_gpu_offset_no_check(bo);
+}
+
+/**
+ * amdgpu_bo_fb_aper_addr - return FB aperture GPU offset of the VRAM bo
+ * @bo: amdgpu VRAM buffer object for which we query the offset
+ *
+ * Returns:
+ * current FB aperture GPU offset of the object.
+ */
+u64 amdgpu_bo_fb_aper_addr(struct amdgpu_bo *bo)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ uint64_t offset, fb_base;
+
+ WARN_ON_ONCE(bo->tbo.resource->mem_type != TTM_PL_VRAM);
+
+ fb_base = adev->gmc.fb_start;
+ fb_base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
+ offset = (bo->tbo.resource->start << PAGE_SHIFT) + fb_base;
+ return amdgpu_gmc_sign_extend(offset);
+}
+
+/**
+ * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo
+ * @bo: amdgpu object for which we query the offset
+ *
+ * Returns:
+ * current GPU offset of the object without raising warnings.
+ */
+u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ uint64_t offset = AMDGPU_BO_INVALID_OFFSET;
+
+ if (bo->tbo.resource->mem_type == TTM_PL_TT)
+ offset = amdgpu_gmc_agp_addr(&bo->tbo);
+
+ if (offset == AMDGPU_BO_INVALID_OFFSET)
+ offset = (bo->tbo.resource->start << PAGE_SHIFT) +
+ amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
+
+ return amdgpu_gmc_sign_extend(offset);
}
+
+/**
+ * amdgpu_bo_mem_stats_placement - bo placement for memory accounting
+ * @bo: the buffer object we should look at
+ *
+ * BO can have multiple preferred placements, to avoid double counting we want
+ * to file it under a single placement for memory stats.
+ * Luckily, if we take the highest set bit in preferred_domains the result is
+ * quite sensible.
+ *
+ * Returns:
+ * Which of the placements should the BO be accounted under.
+ */
+uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo)
+{
+ uint32_t domain = bo->preferred_domains & AMDGPU_GEM_DOMAIN_MASK;
+
+ if (!domain)
+ return TTM_PL_SYSTEM;
+
+ switch (rounddown_pow_of_two(domain)) {
+ case AMDGPU_GEM_DOMAIN_CPU:
+ return TTM_PL_SYSTEM;
+ case AMDGPU_GEM_DOMAIN_GTT:
+ return TTM_PL_TT;
+ case AMDGPU_GEM_DOMAIN_VRAM:
+ return TTM_PL_VRAM;
+ case AMDGPU_GEM_DOMAIN_GDS:
+ return AMDGPU_PL_GDS;
+ case AMDGPU_GEM_DOMAIN_GWS:
+ return AMDGPU_PL_GWS;
+ case AMDGPU_GEM_DOMAIN_OA:
+ return AMDGPU_PL_OA;
+ case AMDGPU_GEM_DOMAIN_DOORBELL:
+ return AMDGPU_PL_DOORBELL;
+ case AMDGPU_GEM_DOMAIN_MMIO_REMAP:
+ return AMDGPU_PL_MMIO_REMAP;
+ default:
+ return TTM_PL_SYSTEM;
+ }
+}
+
+/**
+ * amdgpu_bo_get_preferred_domain - get preferred domain
+ * @adev: amdgpu device object
+ * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
+ *
+ * Returns:
+ * Which of the allowed domains is preferred for allocating the BO.
+ */
+uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
+ uint32_t domain)
+{
+ if ((domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) &&
+ ((adev->asic_type == CHIP_CARRIZO) || (adev->asic_type == CHIP_STONEY))) {
+ domain = AMDGPU_GEM_DOMAIN_VRAM;
+ if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
+ domain = AMDGPU_GEM_DOMAIN_GTT;
+ }
+ return domain;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+#define amdgpu_bo_print_flag(m, bo, flag) \
+ do { \
+ if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \
+ seq_printf((m), " " #flag); \
+ } \
+ } while (0)
+
+/**
+ * amdgpu_bo_print_info - print BO info in debugfs file
+ *
+ * @id: Index or Id of the BO
+ * @bo: Requested BO for printing info
+ * @m: debugfs file
+ *
+ * Print BO information in debugfs file
+ *
+ * Returns:
+ * Size of the BO in bytes.
+ */
+u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct dma_buf_attachment *attachment;
+ struct dma_buf *dma_buf;
+ const char *placement;
+ unsigned int pin_count;
+ u64 size;
+
+ if (dma_resv_trylock(bo->tbo.base.resv)) {
+ if (!bo->tbo.resource) {
+ placement = "NONE";
+ } else {
+ switch (bo->tbo.resource->mem_type) {
+ case TTM_PL_VRAM:
+ if (amdgpu_res_cpu_visible(adev, bo->tbo.resource))
+ placement = "VRAM VISIBLE";
+ else
+ placement = "VRAM";
+ break;
+ case TTM_PL_TT:
+ placement = "GTT";
+ break;
+ case AMDGPU_PL_GDS:
+ placement = "GDS";
+ break;
+ case AMDGPU_PL_GWS:
+ placement = "GWS";
+ break;
+ case AMDGPU_PL_OA:
+ placement = "OA";
+ break;
+ case AMDGPU_PL_PREEMPT:
+ placement = "PREEMPTIBLE";
+ break;
+ case AMDGPU_PL_DOORBELL:
+ placement = "DOORBELL";
+ break;
+ case AMDGPU_PL_MMIO_REMAP:
+ placement = "MMIO REMAP";
+ break;
+ case TTM_PL_SYSTEM:
+ default:
+ placement = "CPU";
+ break;
+ }
+ }
+ dma_resv_unlock(bo->tbo.base.resv);
+ } else {
+ placement = "UNKNOWN";
+ }
+
+ size = amdgpu_bo_size(bo);
+ seq_printf(m, "\t\t0x%08x: %12lld byte %s",
+ id, size, placement);
+
+ pin_count = READ_ONCE(bo->tbo.pin_count);
+ if (pin_count)
+ seq_printf(m, " pin count %d", pin_count);
+
+ dma_buf = READ_ONCE(bo->tbo.base.dma_buf);
+ attachment = READ_ONCE(bo->tbo.base.import_attach);
+
+ if (attachment)
+ seq_printf(m, " imported from ino:%lu", file_inode(dma_buf->file)->i_ino);
+ else if (dma_buf)
+ seq_printf(m, " exported as ino:%lu", file_inode(dma_buf->file)->i_ino);
+
+ amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
+ amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS);
+ amdgpu_bo_print_flag(m, bo, CPU_GTT_USWC);
+ amdgpu_bo_print_flag(m, bo, VRAM_CLEARED);
+ amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
+ amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID);
+ amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC);
+ /* Add the gem obj resv fence dump*/
+ if (dma_resv_trylock(bo->tbo.base.resv)) {
+ dma_resv_describe(bo->tbo.base.resv, m);
+ dma_resv_unlock(bo->tbo.base.resv);
+ }
+ seq_puts(m, "\n");
+
+ return size;
+}
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 382485115b06..656b8a931dae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -30,8 +30,119 @@
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
+#include "amdgpu_res_cursor.h"
+
+#ifdef CONFIG_MMU_NOTIFIER
+#include <linux/mmu_notifier.h>
+#endif
#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
+#define AMDGPU_BO_MAX_PLACEMENTS 3
+
+/* BO flag to indicate a KFD userptr BO */
+#define AMDGPU_AMDKFD_CREATE_USERPTR_BO (1ULL << 63)
+
+#define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo)
+#define to_amdgpu_bo_vm(abo) container_of((abo), struct amdgpu_bo_vm, bo)
+
+struct amdgpu_bo_param {
+ unsigned long size;
+ int byte_align;
+ u32 bo_ptr_size;
+ u32 domain;
+ u32 preferred_domain;
+ u64 flags;
+ enum ttm_bo_type type;
+ bool no_wait_gpu;
+ struct dma_resv *resv;
+ void (*destroy)(struct ttm_buffer_object *bo);
+ /* xcp partition number plus 1, 0 means any partition */
+ int8_t xcp_id_plus1;
+};
+
+/* bo virtual addresses in a vm */
+struct amdgpu_bo_va_mapping {
+ struct amdgpu_bo_va *bo_va;
+ struct list_head list;
+ struct rb_node rb;
+ uint64_t start;
+ uint64_t last;
+ uint64_t __subtree_last;
+ uint64_t offset;
+ uint32_t flags;
+};
+
+/* User space allocated BO in a VM */
+struct amdgpu_bo_va {
+ struct amdgpu_vm_bo_base base;
+
+ /* protected by bo being reserved */
+ unsigned ref_count;
+
+ /* all other members protected by the VM PD being reserved */
+ struct dma_fence *last_pt_update;
+
+ /* mappings for this bo_va */
+ struct list_head invalids;
+ struct list_head valids;
+
+ /* If the mappings are cleared or filled */
+ bool cleared;
+
+ bool is_xgmi;
+
+ /*
+ * protected by vm reservation lock
+ * if non-zero, cannot unmap from GPU because user queues may still access it
+ */
+ unsigned int queue_refcount;
+};
+
+struct amdgpu_bo {
+ /* Protected by tbo.reserved */
+ u32 preferred_domains;
+ u32 allowed_domains;
+ struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS];
+ struct ttm_placement placement;
+ struct ttm_buffer_object tbo;
+ struct ttm_bo_kmap_obj kmap;
+ u64 flags;
+ /* per VM structure for page tables and with virtual addresses */
+ struct amdgpu_vm_bo_base *vm_bo;
+ /* Constant after initialization */
+ struct amdgpu_bo *parent;
+
+#ifdef CONFIG_MMU_NOTIFIER
+ struct mmu_interval_notifier notifier;
+#endif
+ struct kgd_mem *kfd_bo;
+
+ /*
+ * For GPUs with spatial partitioning, xcp partition number, -1 means
+ * any partition. For other ASICs without spatial partition, always 0
+ * for memory accounting.
+ */
+ int8_t xcp_id;
+};
+
+struct amdgpu_bo_user {
+ struct amdgpu_bo bo;
+ u64 tiling_flags;
+ u64 metadata_flags;
+ void *metadata;
+ u32 metadata_size;
+
+};
+
+struct amdgpu_bo_vm {
+ struct amdgpu_bo bo;
+ struct amdgpu_vm_bo_base entries[];
+};
+
+static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
+{
+ return container_of(tbo, struct amdgpu_bo, tbo);
+}
/**
* amdgpu_mem_type_to_domain - return domain corresponding to mem_type
@@ -54,6 +165,10 @@ static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
return AMDGPU_GEM_DOMAIN_GWS;
case AMDGPU_PL_OA:
return AMDGPU_GEM_DOMAIN_OA;
+ case AMDGPU_PL_DOORBELL:
+ return AMDGPU_GEM_DOMAIN_DOORBELL;
+ case AMDGPU_PL_MMIO_REMAP:
+ return AMDGPU_GEM_DOMAIN_MMIO_REMAP;
default:
break;
}
@@ -90,17 +205,17 @@ static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
{
- return bo->tbo.num_pages << PAGE_SHIFT;
+ return bo->tbo.base.size;
}
static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
{
- return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
+ return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE;
}
static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
{
- return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
+ return (bo->tbo.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
}
/**
@@ -111,51 +226,67 @@ static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
*/
static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
{
- return drm_vma_node_offset_addr(&bo->tbo.vma_node);
+ return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
}
/**
- * amdgpu_bo_gpu_accessible - return whether the bo is currently in memory that
- * is accessible to the GPU.
+ * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
*/
-static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
+static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
{
- return bo->tbo.mem.mem_type != TTM_PL_SYSTEM;
+ return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
}
+/**
+ * amdgpu_bo_encrypted - test if the BO is encrypted
+ * @bo: pointer to a buffer object
+ *
+ * Return true if the buffer object is encrypted, false otherwise.
+ */
+static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo)
+{
+ return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED;
+}
+
+bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
+void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
+
int amdgpu_bo_create(struct amdgpu_device *adev,
- unsigned long size, int byte_align,
- bool kernel, u32 domain, u64 flags,
- struct sg_table *sg,
- struct reservation_object *resv,
- struct amdgpu_bo **bo_ptr);
-int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
- unsigned long size, int byte_align,
- bool kernel, u32 domain, u64 flags,
- struct sg_table *sg,
- struct ttm_placement *placement,
- struct reservation_object *resv,
- struct amdgpu_bo **bo_ptr);
+ struct amdgpu_bo_param *bp,
+ struct amdgpu_bo **bo_ptr);
+int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
+ unsigned long size, int align,
+ u32 domain, struct amdgpu_bo **bo_ptr,
+ u64 *gpu_addr, void **cpu_addr);
int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
unsigned long size, int align,
u32 domain, struct amdgpu_bo **bo_ptr,
u64 *gpu_addr, void **cpu_addr);
+int amdgpu_bo_create_isp_user(struct amdgpu_device *adev,
+ struct dma_buf *dbuf, u32 domain,
+ struct amdgpu_bo **bo,
+ u64 *gpu_addr);
+int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
+ uint64_t offset, uint64_t size,
+ struct amdgpu_bo **bo_ptr, void **cpu_addr);
+int amdgpu_bo_create_user(struct amdgpu_device *adev,
+ struct amdgpu_bo_param *bp,
+ struct amdgpu_bo_user **ubo_ptr);
+int amdgpu_bo_create_vm(struct amdgpu_device *adev,
+ struct amdgpu_bo_param *bp,
+ struct amdgpu_bo_vm **ubo_ptr);
void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
void **cpu_addr);
+void amdgpu_bo_free_isp_user(struct amdgpu_bo *bo);
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
+void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
void amdgpu_bo_unref(struct amdgpu_bo **bo);
-int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr);
-int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
- u64 min_offset, u64 max_offset,
- u64 *gpu_addr);
-int amdgpu_bo_unpin(struct amdgpu_bo *bo);
-int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
+int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
+void amdgpu_bo_unpin(struct amdgpu_bo *bo);
int amdgpu_bo_init(struct amdgpu_device *adev);
void amdgpu_bo_fini(struct amdgpu_device *adev);
-int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
- struct vm_area_struct *vma);
int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
@@ -165,37 +296,41 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
uint64_t *flags);
void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
- struct ttm_mem_reg *new_mem);
-int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+ struct ttm_resource *new_mem);
+void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
+vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared);
+int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
+ enum amdgpu_sync_mode sync_mode, void *owner,
+ bool intr);
+int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
-int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct reservation_object *resv,
- struct dma_fence **fence, bool direct);
-int amdgpu_bo_validate(struct amdgpu_bo *bo);
-int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct reservation_object *resv,
- struct dma_fence **fence,
- bool direct);
-
+u64 amdgpu_bo_fb_aper_addr(struct amdgpu_bo *bo);
+u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
+uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo);
+uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
+ uint32_t domain);
/*
* sub allocation
*/
+static inline struct amdgpu_sa_manager *
+to_amdgpu_sa_manager(struct drm_suballoc_manager *manager)
+{
+ return container_of(manager, struct amdgpu_sa_manager, base);
+}
-static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
+static inline uint64_t amdgpu_sa_bo_gpu_addr(struct drm_suballoc *sa_bo)
{
- return sa_bo->manager->gpu_addr + sa_bo->soffset;
+ return to_amdgpu_sa_manager(sa_bo->manager)->gpu_addr +
+ drm_suballoc_soffset(sa_bo);
}
-static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
+static inline void *amdgpu_sa_bo_cpu_addr(struct drm_suballoc *sa_bo)
{
- return sa_bo->manager->cpu_ptr + sa_bo->soffset;
+ return to_amdgpu_sa_manager(sa_bo->manager)->cpu_ptr +
+ drm_suballoc_soffset(sa_bo);
}
int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
@@ -205,18 +340,19 @@ void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager);
int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager);
-int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
- struct amdgpu_sa_manager *sa_manager);
int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
- struct amdgpu_sa_bo **sa_bo,
- unsigned size, unsigned align);
-void amdgpu_sa_bo_free(struct amdgpu_device *adev,
- struct amdgpu_sa_bo **sa_bo,
- struct dma_fence *fence);
+ struct drm_suballoc **sa_bo,
+ unsigned int size);
+void amdgpu_sa_bo_free(struct drm_suballoc **sa_bo,
+ struct dma_fence *fence);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m);
+u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m);
#endif
+void amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
+
+bool amdgpu_bo_support_uswc(u64 bo_flags);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
index 8e67c1210d7c..675aa138ea11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
@@ -20,7 +20,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include <drm/drmP.h>
+
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "atom.h"
@@ -69,6 +69,7 @@ static void amdgpu_pll_reduce_ratio(unsigned *nom, unsigned *den,
/**
* amdgpu_pll_get_fb_ref_div - feedback and ref divider calculation
*
+ * @adev: amdgpu_device pointer
* @nom: nominator
* @den: denominator
* @post_div: post divider
@@ -80,15 +81,20 @@ static void amdgpu_pll_reduce_ratio(unsigned *nom, unsigned *den,
* Calculate feedback and reference divider for a given post divider. Makes
* sure we stay within the limits.
*/
-static void amdgpu_pll_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
- unsigned fb_div_max, unsigned ref_div_max,
- unsigned *fb_div, unsigned *ref_div)
+static void amdgpu_pll_get_fb_ref_div(struct amdgpu_device *adev, unsigned int nom,
+ unsigned int den, unsigned int post_div,
+ unsigned int fb_div_max, unsigned int ref_div_max,
+ unsigned int *fb_div, unsigned int *ref_div)
{
+
/* limit reference * post divider to a maximum */
- ref_div_max = min(128 / post_div, ref_div_max);
+ if (adev->family == AMDGPU_FAMILY_SI)
+ ref_div_max = min(100 / post_div, ref_div_max);
+ else
+ ref_div_max = min(128 / post_div, ref_div_max);
/* get matching reference and feedback divider */
- *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
+ *ref_div = clamp(DIV_ROUND_CLOSEST(den, post_div), 1u, ref_div_max);
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
/* limit fb divider to its maximum */
@@ -101,17 +107,20 @@ static void amdgpu_pll_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_
/**
* amdgpu_pll_compute - compute PLL paramaters
*
+ * @adev: amdgpu_device pointer
* @pll: information about the PLL
+ * @freq: requested frequency
* @dot_clock_p: resulting pixel clock
- * fb_div_p: resulting feedback divider
- * frac_fb_div_p: fractional part of the feedback divider
- * ref_div_p: resulting reference divider
- * post_div_p: resulting reference divider
+ * @fb_div_p: resulting feedback divider
+ * @frac_fb_div_p: fractional part of the feedback divider
+ * @ref_div_p: resulting reference divider
+ * @post_div_p: resulting reference divider
*
* Try to calculate the PLL parameters to generate the given frequency:
* dot_clock = (ref_freq * feedback_div) / (ref_div * post_div)
*/
-void amdgpu_pll_compute(struct amdgpu_pll *pll,
+void amdgpu_pll_compute(struct amdgpu_device *adev,
+ struct amdgpu_pll *pll,
u32 freq,
u32 *dot_clock_p,
u32 *fb_div_p,
@@ -198,7 +207,7 @@ void amdgpu_pll_compute(struct amdgpu_pll *pll,
for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
unsigned diff;
- amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max,
+ amdgpu_pll_get_fb_ref_div(adev, nom, den, post_div, fb_div_max,
ref_div_max, &fb_div, &ref_div);
diff = abs(target_clock - (pll->reference_freq * fb_div) /
(ref_div * post_div));
@@ -213,7 +222,7 @@ void amdgpu_pll_compute(struct amdgpu_pll *pll,
post_div = post_div_best;
/* get the feedback and reference divider for the optimal value */
- amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max,
+ amdgpu_pll_get_fb_ref_div(adev, nom, den, post_div, fb_div_max, ref_div_max,
&fb_div, &ref_div);
/* reduce the numbers to a simpler ratio once more */
@@ -308,7 +317,6 @@ int amdgpu_pll_get_shared_dp_ppll(struct drm_crtc *crtc)
* amdgpu_pll_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc
*
* @crtc: drm crtc
- * @encoder: drm encoder
*
* Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can
* be shared (i.e., same clock).
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.h
index db6136f68b82..44a583d6c9b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pll.h
@@ -24,7 +24,8 @@
#ifndef __AMDGPU_PLL_H__
#define __AMDGPU_PLL_H__
-void amdgpu_pll_compute(struct amdgpu_pll *pll,
+void amdgpu_pll_compute(struct amdgpu_device *adev,
+ struct amdgpu_pll *pll,
u32 freq,
u32 *dot_clock_p,
u32 *fb_div_p,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
deleted file mode 100644
index 7df503aedb69..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ /dev/null
@@ -1,1659 +0,0 @@
-/*
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Rafał Miłecki <zajec5@gmail.com>
- * Alex Deucher <alexdeucher@gmail.com>
- */
-#include <drm/drmP.h>
-#include "amdgpu.h"
-#include "amdgpu_drv.h"
-#include "amdgpu_pm.h"
-#include "amdgpu_dpm.h"
-#include "atom.h"
-#include <linux/power_supply.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-
-#include "amd_powerplay.h"
-
-static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
-
-static const struct cg_flag_name clocks[] = {
- {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
- {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
- {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
- {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
- {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
- {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
- {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
- {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
- {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
- {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
- {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
- {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
- {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
- {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
- {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
- {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
- {0, NULL},
-};
-
-void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
-{
- if (adev->pp_enabled)
- /* TODO */
- return;
-
- if (adev->pm.dpm_enabled) {
- mutex_lock(&adev->pm.mutex);
- if (power_supply_is_system_supplied() > 0)
- adev->pm.dpm.ac_power = true;
- else
- adev->pm.dpm.ac_power = false;
- if (adev->pm.funcs->enable_bapm)
- amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
- mutex_unlock(&adev->pm.mutex);
- }
-}
-
-static ssize_t amdgpu_get_dpm_state(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- enum amd_pm_state_type pm;
-
- if (adev->pp_enabled) {
- pm = amdgpu_dpm_get_current_power_state(adev);
- } else
- pm = adev->pm.dpm.user_state;
-
- return snprintf(buf, PAGE_SIZE, "%s\n",
- (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
- (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
-}
-
-static ssize_t amdgpu_set_dpm_state(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- enum amd_pm_state_type state;
-
- if (strncmp("battery", buf, strlen("battery")) == 0)
- state = POWER_STATE_TYPE_BATTERY;
- else if (strncmp("balanced", buf, strlen("balanced")) == 0)
- state = POWER_STATE_TYPE_BALANCED;
- else if (strncmp("performance", buf, strlen("performance")) == 0)
- state = POWER_STATE_TYPE_PERFORMANCE;
- else {
- count = -EINVAL;
- goto fail;
- }
-
- if (adev->pp_enabled) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
- } else {
- mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.user_state = state;
- mutex_unlock(&adev->pm.mutex);
-
- /* Can't set dpm state when the card is off */
- if (!(adev->flags & AMD_IS_PX) ||
- (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
- amdgpu_pm_compute_clocks(adev);
- }
-fail:
- return count;
-}
-
-static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- enum amd_dpm_forced_level level;
-
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return snprintf(buf, PAGE_SIZE, "off\n");
-
- level = amdgpu_dpm_get_performance_level(adev);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
- (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
- (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
- (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
- (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
- (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
- (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
- (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
- "unknown");
-}
-
-static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- enum amd_dpm_forced_level level;
- enum amd_dpm_forced_level current_level;
- int ret = 0;
-
- /* Can't force performance level when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
-
- current_level = amdgpu_dpm_get_performance_level(adev);
-
- if (strncmp("low", buf, strlen("low")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_LOW;
- } else if (strncmp("high", buf, strlen("high")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_HIGH;
- } else if (strncmp("auto", buf, strlen("auto")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_AUTO;
- } else if (strncmp("manual", buf, strlen("manual")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_MANUAL;
- } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
- } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
- } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
- } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
- } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
- level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
- } else {
- count = -EINVAL;
- goto fail;
- }
-
- if (current_level == level)
- return count;
-
- if (adev->pp_enabled)
- amdgpu_dpm_force_performance_level(adev, level);
- else {
- mutex_lock(&adev->pm.mutex);
- if (adev->pm.dpm.thermal_active) {
- count = -EINVAL;
- mutex_unlock(&adev->pm.mutex);
- goto fail;
- }
- ret = amdgpu_dpm_force_performance_level(adev, level);
- if (ret)
- count = -EINVAL;
- else
- adev->pm.dpm.forced_level = level;
- mutex_unlock(&adev->pm.mutex);
- }
-
-fail:
- return count;
-}
-
-static ssize_t amdgpu_get_pp_num_states(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- struct pp_states_info data;
- int i, buf_len;
-
- if (adev->pp_enabled)
- amdgpu_dpm_get_pp_num_states(adev, &data);
-
- buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
- for (i = 0; i < data.nums; i++)
- buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
- (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
- (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
- (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
- (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
-
- return buf_len;
-}
-
-static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- struct pp_states_info data;
- enum amd_pm_state_type pm = 0;
- int i = 0;
-
- if (adev->pp_enabled) {
-
- pm = amdgpu_dpm_get_current_power_state(adev);
- amdgpu_dpm_get_pp_num_states(adev, &data);
-
- for (i = 0; i < data.nums; i++) {
- if (pm == data.states[i])
- break;
- }
-
- if (i == data.nums)
- i = -EINVAL;
- }
-
- return snprintf(buf, PAGE_SIZE, "%d\n", i);
-}
-
-static ssize_t amdgpu_get_pp_force_state(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- struct pp_states_info data;
- enum amd_pm_state_type pm = 0;
- int i;
-
- if (adev->pp_force_state_enabled && adev->pp_enabled) {
- pm = amdgpu_dpm_get_current_power_state(adev);
- amdgpu_dpm_get_pp_num_states(adev, &data);
-
- for (i = 0; i < data.nums; i++) {
- if (pm == data.states[i])
- break;
- }
-
- if (i == data.nums)
- i = -EINVAL;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", i);
-
- } else
- return snprintf(buf, PAGE_SIZE, "\n");
-}
-
-static ssize_t amdgpu_set_pp_force_state(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- enum amd_pm_state_type state = 0;
- unsigned long idx;
- int ret;
-
- if (strlen(buf) == 1)
- adev->pp_force_state_enabled = false;
- else if (adev->pp_enabled) {
- struct pp_states_info data;
-
- ret = kstrtoul(buf, 0, &idx);
- if (ret || idx >= ARRAY_SIZE(data.states)) {
- count = -EINVAL;
- goto fail;
- }
-
- amdgpu_dpm_get_pp_num_states(adev, &data);
- state = data.states[idx];
- /* only set user selected power states */
- if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
- state != POWER_STATE_TYPE_DEFAULT) {
- amdgpu_dpm_dispatch_task(adev,
- AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
- adev->pp_force_state_enabled = true;
- }
- }
-fail:
- return count;
-}
-
-static ssize_t amdgpu_get_pp_table(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- char *table = NULL;
- int size;
-
- if (adev->pp_enabled)
- size = amdgpu_dpm_get_pp_table(adev, &table);
- else
- return 0;
-
- if (size >= PAGE_SIZE)
- size = PAGE_SIZE - 1;
-
- memcpy(buf, table, size);
-
- return size;
-}
-
-static ssize_t amdgpu_set_pp_table(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
-
- if (adev->pp_enabled)
- amdgpu_dpm_set_pp_table(adev, buf, count);
-
- return count;
-}
-
-static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size = 0;
-
- if (adev->pp_enabled)
- size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
- else if (adev->pm.funcs->print_clock_levels)
- size = adev->pm.funcs->print_clock_levels(adev, PP_SCLK, buf);
-
- return size;
-}
-
-static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int ret;
- long level;
- uint32_t i, mask = 0;
- char sub_str[2];
-
- for (i = 0; i < strlen(buf); i++) {
- if (*(buf + i) == '\n')
- continue;
- sub_str[0] = *(buf + i);
- sub_str[1] = '\0';
- ret = kstrtol(sub_str, 0, &level);
-
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
- mask |= 1 << level;
- }
-
- if (adev->pp_enabled)
- amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
- else if (adev->pm.funcs->force_clock_level)
- adev->pm.funcs->force_clock_level(adev, PP_SCLK, mask);
-fail:
- return count;
-}
-
-static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size = 0;
-
- if (adev->pp_enabled)
- size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
- else if (adev->pm.funcs->print_clock_levels)
- size = adev->pm.funcs->print_clock_levels(adev, PP_MCLK, buf);
-
- return size;
-}
-
-static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int ret;
- long level;
- uint32_t i, mask = 0;
- char sub_str[2];
-
- for (i = 0; i < strlen(buf); i++) {
- if (*(buf + i) == '\n')
- continue;
- sub_str[0] = *(buf + i);
- sub_str[1] = '\0';
- ret = kstrtol(sub_str, 0, &level);
-
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
- mask |= 1 << level;
- }
-
- if (adev->pp_enabled)
- amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
- else if (adev->pm.funcs->force_clock_level)
- adev->pm.funcs->force_clock_level(adev, PP_MCLK, mask);
-fail:
- return count;
-}
-
-static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size = 0;
-
- if (adev->pp_enabled)
- size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
- else if (adev->pm.funcs->print_clock_levels)
- size = adev->pm.funcs->print_clock_levels(adev, PP_PCIE, buf);
-
- return size;
-}
-
-static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int ret;
- long level;
- uint32_t i, mask = 0;
- char sub_str[2];
-
- for (i = 0; i < strlen(buf); i++) {
- if (*(buf + i) == '\n')
- continue;
- sub_str[0] = *(buf + i);
- sub_str[1] = '\0';
- ret = kstrtol(sub_str, 0, &level);
-
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
- mask |= 1 << level;
- }
-
- if (adev->pp_enabled)
- amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
- else if (adev->pm.funcs->force_clock_level)
- adev->pm.funcs->force_clock_level(adev, PP_PCIE, mask);
-fail:
- return count;
-}
-
-static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- uint32_t value = 0;
-
- if (adev->pp_enabled)
- value = amdgpu_dpm_get_sclk_od(adev);
- else if (adev->pm.funcs->get_sclk_od)
- value = adev->pm.funcs->get_sclk_od(adev);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", value);
-}
-
-static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int ret;
- long int value;
-
- ret = kstrtol(buf, 0, &value);
-
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
-
- if (adev->pp_enabled) {
- amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
- amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
- } else if (adev->pm.funcs->set_sclk_od) {
- adev->pm.funcs->set_sclk_od(adev, (uint32_t)value);
- adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
- amdgpu_pm_compute_clocks(adev);
- }
-
-fail:
- return count;
-}
-
-static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- uint32_t value = 0;
-
- if (adev->pp_enabled)
- value = amdgpu_dpm_get_mclk_od(adev);
- else if (adev->pm.funcs->get_mclk_od)
- value = adev->pm.funcs->get_mclk_od(adev);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", value);
-}
-
-static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int ret;
- long int value;
-
- ret = kstrtol(buf, 0, &value);
-
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
-
- if (adev->pp_enabled) {
- amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
- amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
- } else if (adev->pm.funcs->set_mclk_od) {
- adev->pm.funcs->set_mclk_od(adev, (uint32_t)value);
- adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
- amdgpu_pm_compute_clocks(adev);
- }
-
-fail:
- return count;
-}
-
-static ssize_t amdgpu_get_pp_power_profile(struct device *dev,
- char *buf, struct amd_pp_profile *query)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- int ret = 0;
-
- if (adev->pp_enabled)
- ret = amdgpu_dpm_get_power_profile_state(
- adev, query);
- else if (adev->pm.funcs->get_power_profile_state)
- ret = adev->pm.funcs->get_power_profile_state(
- adev, query);
-
- if (ret)
- return ret;
-
- return snprintf(buf, PAGE_SIZE,
- "%d %d %d %d %d\n",
- query->min_sclk / 100,
- query->min_mclk / 100,
- query->activity_threshold,
- query->up_hyst,
- query->down_hyst);
-}
-
-static ssize_t amdgpu_get_pp_gfx_power_profile(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amd_pp_profile query = {0};
-
- query.type = AMD_PP_GFX_PROFILE;
-
- return amdgpu_get_pp_power_profile(dev, buf, &query);
-}
-
-static ssize_t amdgpu_get_pp_compute_power_profile(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amd_pp_profile query = {0};
-
- query.type = AMD_PP_COMPUTE_PROFILE;
-
- return amdgpu_get_pp_power_profile(dev, buf, &query);
-}
-
-static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
- const char *buf,
- size_t count,
- struct amd_pp_profile *request)
-{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct amdgpu_device *adev = ddev->dev_private;
- uint32_t loop = 0;
- char *sub_str, buf_cpy[128], *tmp_str;
- const char delimiter[3] = {' ', '\n', '\0'};
- long int value;
- int ret = 0;
-
- if (strncmp("reset", buf, strlen("reset")) == 0) {
- if (adev->pp_enabled)
- ret = amdgpu_dpm_reset_power_profile_state(
- adev, request);
- else if (adev->pm.funcs->reset_power_profile_state)
- ret = adev->pm.funcs->reset_power_profile_state(
- adev, request);
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
- return count;
- }
-
- if (strncmp("set", buf, strlen("set")) == 0) {
- if (adev->pp_enabled)
- ret = amdgpu_dpm_set_power_profile_state(
- adev, request);
- else if (adev->pm.funcs->set_power_profile_state)
- ret = adev->pm.funcs->set_power_profile_state(
- adev, request);
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
- return count;
- }
-
- if (count + 1 >= 128) {
- count = -EINVAL;
- goto fail;
- }
-
- memcpy(buf_cpy, buf, count + 1);
- tmp_str = buf_cpy;
-
- while (tmp_str[0]) {
- sub_str = strsep(&tmp_str, delimiter);
- ret = kstrtol(sub_str, 0, &value);
- if (ret) {
- count = -EINVAL;
- goto fail;
- }
-
- switch (loop) {
- case 0:
- /* input unit MHz convert to dpm table unit 10KHz*/
- request->min_sclk = (uint32_t)value * 100;
- break;
- case 1:
- /* input unit MHz convert to dpm table unit 10KHz*/
- request->min_mclk = (uint32_t)value * 100;
- break;
- case 2:
- request->activity_threshold = (uint16_t)value;
- break;
- case 3:
- request->up_hyst = (uint8_t)value;
- break;
- case 4:
- request->down_hyst = (uint8_t)value;
- break;
- default:
- break;
- }
-
- loop++;
- }
-
- if (adev->pp_enabled)
- ret = amdgpu_dpm_set_power_profile_state(
- adev, request);
- else if (adev->pm.funcs->set_power_profile_state)
- ret = adev->pm.funcs->set_power_profile_state(
- adev, request);
-
- if (ret)
- count = -EINVAL;
-
-fail:
- return count;
-}
-
-static ssize_t amdgpu_set_pp_gfx_power_profile(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct amd_pp_profile request = {0};
-
- request.type = AMD_PP_GFX_PROFILE;
-
- return amdgpu_set_pp_power_profile(dev, buf, count, &request);
-}
-
-static ssize_t amdgpu_set_pp_compute_power_profile(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct amd_pp_profile request = {0};
-
- request.type = AMD_PP_COMPUTE_PROFILE;
-
- return amdgpu_set_pp_power_profile(dev, buf, count, &request);
-}
-
-static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
-static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
- amdgpu_get_dpm_forced_performance_level,
- amdgpu_set_dpm_forced_performance_level);
-static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL);
-static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL);
-static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_force_state,
- amdgpu_set_pp_force_state);
-static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_table,
- amdgpu_set_pp_table);
-static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_dpm_sclk,
- amdgpu_set_pp_dpm_sclk);
-static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_dpm_mclk,
- amdgpu_set_pp_dpm_mclk);
-static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_dpm_pcie,
- amdgpu_set_pp_dpm_pcie);
-static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_sclk_od,
- amdgpu_set_pp_sclk_od);
-static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_mclk_od,
- amdgpu_set_pp_mclk_od);
-static DEVICE_ATTR(pp_gfx_power_profile, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_gfx_power_profile,
- amdgpu_set_pp_gfx_power_profile);
-static DEVICE_ATTR(pp_compute_power_profile, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_compute_power_profile,
- amdgpu_set_pp_compute_power_profile);
-
-static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- struct drm_device *ddev = adev->ddev;
- int temp;
-
- /* Can't get temperature when the card is off */
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
-
- if (!adev->pp_enabled && !adev->pm.funcs->get_temperature)
- temp = 0;
- else
- temp = amdgpu_dpm_get_temperature(adev);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", temp);
-}
-
-static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int hyst = to_sensor_dev_attr(attr)->index;
- int temp;
-
- if (hyst)
- temp = adev->pm.dpm.thermal.min_temp;
- else
- temp = adev->pm.dpm.thermal.max_temp;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", temp);
-}
-
-static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- u32 pwm_mode = 0;
-
- if (!adev->pp_enabled && !adev->pm.funcs->get_fan_control_mode)
- return -EINVAL;
-
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
-
- return sprintf(buf, "%i\n", pwm_mode);
-}
-
-static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int err;
- int value;
-
- if (!adev->pp_enabled && !adev->pm.funcs->set_fan_control_mode)
- return -EINVAL;
-
- err = kstrtoint(buf, 10, &value);
- if (err)
- return err;
-
- amdgpu_dpm_set_fan_control_mode(adev, value);
-
- return count;
-}
-
-static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%i\n", 0);
-}
-
-static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%i\n", 255);
-}
-
-static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int err;
- u32 value;
-
- err = kstrtou32(buf, 10, &value);
- if (err)
- return err;
-
- value = (value * 100) / 255;
-
- err = amdgpu_dpm_set_fan_speed_percent(adev, value);
- if (err)
- return err;
-
- return count;
-}
-
-static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int err;
- u32 speed;
-
- err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
- if (err)
- return err;
-
- speed = (speed * 255) / 100;
-
- return sprintf(buf, "%i\n", speed);
-}
-
-static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- int err;
- u32 speed;
-
- err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
- if (err)
- return err;
-
- return sprintf(buf, "%i\n", speed);
-}
-
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
-static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
-static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
-static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
-static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
-
-static struct attribute *hwmon_attributes[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_crit.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
- &sensor_dev_attr_pwm1.dev_attr.attr,
- &sensor_dev_attr_pwm1_enable.dev_attr.attr,
- &sensor_dev_attr_pwm1_min.dev_attr.attr,
- &sensor_dev_attr_pwm1_max.dev_attr.attr,
- &sensor_dev_attr_fan1_input.dev_attr.attr,
- NULL
-};
-
-static umode_t hwmon_attributes_visible(struct kobject *kobj,
- struct attribute *attr, int index)
-{
- struct device *dev = kobj_to_dev(kobj);
- struct amdgpu_device *adev = dev_get_drvdata(dev);
- umode_t effective_mode = attr->mode;
-
- /* Skip limit attributes if DPM is not enabled */
- if (!adev->pm.dpm_enabled &&
- (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
- attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
- return 0;
-
- if (adev->pp_enabled)
- return effective_mode;
-
- /* Skip fan attributes if fan is not present */
- if (adev->pm.no_fan &&
- (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
- return 0;
-
- /* mask fan attributes if we have no bindings for this asic to expose */
- if ((!adev->pm.funcs->get_fan_speed_percent &&
- attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
- (!adev->pm.funcs->get_fan_control_mode &&
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
- effective_mode &= ~S_IRUGO;
-
- if ((!adev->pm.funcs->set_fan_speed_percent &&
- attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
- (!adev->pm.funcs->set_fan_control_mode &&
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
- effective_mode &= ~S_IWUSR;
-
- /* hide max/min values if we can't both query and manage the fan */
- if ((!adev->pm.funcs->set_fan_speed_percent &&
- !adev->pm.funcs->get_fan_speed_percent) &&
- (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
- return 0;
-
- /* requires powerplay */
- if (attr == &sensor_dev_attr_fan1_input.dev_attr.attr)
- return 0;
-
- return effective_mode;
-}
-
-static const struct attribute_group hwmon_attrgroup = {
- .attrs = hwmon_attributes,
- .is_visible = hwmon_attributes_visible,
-};
-
-static const struct attribute_group *hwmon_groups[] = {
- &hwmon_attrgroup,
- NULL
-};
-
-void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
-{
- struct amdgpu_device *adev =
- container_of(work, struct amdgpu_device,
- pm.dpm.thermal.work);
- /* switch to the thermal state */
- enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
-
- if (!adev->pm.dpm_enabled)
- return;
-
- if (adev->pm.funcs->get_temperature) {
- int temp = amdgpu_dpm_get_temperature(adev);
-
- if (temp < adev->pm.dpm.thermal.min_temp)
- /* switch back the user state */
- dpm_state = adev->pm.dpm.user_state;
- } else {
- if (adev->pm.dpm.thermal.high_to_low)
- /* switch back the user state */
- dpm_state = adev->pm.dpm.user_state;
- }
- mutex_lock(&adev->pm.mutex);
- if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
- adev->pm.dpm.thermal_active = true;
- else
- adev->pm.dpm.thermal_active = false;
- adev->pm.dpm.state = dpm_state;
- mutex_unlock(&adev->pm.mutex);
-
- amdgpu_pm_compute_clocks(adev);
-}
-
-static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
- enum amd_pm_state_type dpm_state)
-{
- int i;
- struct amdgpu_ps *ps;
- u32 ui_class;
- bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
- true : false;
-
- /* check if the vblank period is too short to adjust the mclk */
- if (single_display && adev->pm.funcs->vblank_too_short) {
- if (amdgpu_dpm_vblank_too_short(adev))
- single_display = false;
- }
-
- /* certain older asics have a separare 3D performance state,
- * so try that first if the user selected performance
- */
- if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
- dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
- /* balanced states don't exist at the moment */
- if (dpm_state == POWER_STATE_TYPE_BALANCED)
- dpm_state = POWER_STATE_TYPE_PERFORMANCE;
-
-restart_search:
- /* Pick the best power state based on current conditions */
- for (i = 0; i < adev->pm.dpm.num_ps; i++) {
- ps = &adev->pm.dpm.ps[i];
- ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
- switch (dpm_state) {
- /* user states */
- case POWER_STATE_TYPE_BATTERY:
- if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
- if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
- if (single_display)
- return ps;
- } else
- return ps;
- }
- break;
- case POWER_STATE_TYPE_BALANCED:
- if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
- if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
- if (single_display)
- return ps;
- } else
- return ps;
- }
- break;
- case POWER_STATE_TYPE_PERFORMANCE:
- if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
- if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
- if (single_display)
- return ps;
- } else
- return ps;
- }
- break;
- /* internal states */
- case POWER_STATE_TYPE_INTERNAL_UVD:
- if (adev->pm.dpm.uvd_ps)
- return adev->pm.dpm.uvd_ps;
- else
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_SD:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_HD:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
- if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_BOOT:
- return adev->pm.dpm.boot_ps;
- case POWER_STATE_TYPE_INTERNAL_THERMAL:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_ACPI:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_ULV:
- if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_3DPERF:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
- return ps;
- break;
- default:
- break;
- }
- }
- /* use a fallback state if we didn't match */
- switch (dpm_state) {
- case POWER_STATE_TYPE_INTERNAL_UVD_SD:
- dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
- goto restart_search;
- case POWER_STATE_TYPE_INTERNAL_UVD_HD:
- case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
- case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
- if (adev->pm.dpm.uvd_ps) {
- return adev->pm.dpm.uvd_ps;
- } else {
- dpm_state = POWER_STATE_TYPE_PERFORMANCE;
- goto restart_search;
- }
- case POWER_STATE_TYPE_INTERNAL_THERMAL:
- dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
- goto restart_search;
- case POWER_STATE_TYPE_INTERNAL_ACPI:
- dpm_state = POWER_STATE_TYPE_BATTERY;
- goto restart_search;
- case POWER_STATE_TYPE_BATTERY:
- case POWER_STATE_TYPE_BALANCED:
- case POWER_STATE_TYPE_INTERNAL_3DPERF:
- dpm_state = POWER_STATE_TYPE_PERFORMANCE;
- goto restart_search;
- default:
- break;
- }
-
- return NULL;
-}
-
-static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
-{
- struct amdgpu_ps *ps;
- enum amd_pm_state_type dpm_state;
- int ret;
- bool equal;
-
- /* if dpm init failed */
- if (!adev->pm.dpm_enabled)
- return;
-
- if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
- /* add other state override checks here */
- if ((!adev->pm.dpm.thermal_active) &&
- (!adev->pm.dpm.uvd_active))
- adev->pm.dpm.state = adev->pm.dpm.user_state;
- }
- dpm_state = adev->pm.dpm.state;
-
- ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
- if (ps)
- adev->pm.dpm.requested_ps = ps;
- else
- return;
-
- if (amdgpu_dpm == 1) {
- printk("switching from power state:\n");
- amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
- printk("switching to power state:\n");
- amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
- }
-
- /* update whether vce is active */
- ps->vce_active = adev->pm.dpm.vce_active;
-
- amdgpu_dpm_display_configuration_changed(adev);
-
- ret = amdgpu_dpm_pre_set_power_state(adev);
- if (ret)
- return;
-
- if ((0 != amgdpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)))
- equal = false;
-
- if (equal)
- return;
-
- amdgpu_dpm_set_power_state(adev);
- amdgpu_dpm_post_set_power_state(adev);
-
- adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
- adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
-
- if (adev->pm.funcs->force_performance_level) {
- if (adev->pm.dpm.thermal_active) {
- enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
- /* force low perf level for thermal */
- amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
- /* save the user's level */
- adev->pm.dpm.forced_level = level;
- } else {
- /* otherwise, user selected level */
- amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
- }
- }
-}
-
-void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
-{
- if (adev->pp_enabled || adev->pm.funcs->powergate_uvd) {
- /* enable/disable UVD */
- mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_powergate_uvd(adev, !enable);
- mutex_unlock(&adev->pm.mutex);
- } else {
- if (enable) {
- mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.uvd_active = true;
- adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
- mutex_unlock(&adev->pm.mutex);
- } else {
- mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.uvd_active = false;
- mutex_unlock(&adev->pm.mutex);
- }
- amdgpu_pm_compute_clocks(adev);
- }
-}
-
-void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
-{
- if (adev->pp_enabled || adev->pm.funcs->powergate_vce) {
- /* enable/disable VCE */
- mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_powergate_vce(adev, !enable);
- mutex_unlock(&adev->pm.mutex);
- } else {
- if (enable) {
- mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.vce_active = true;
- /* XXX select vce level based on ring/task */
- adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
- mutex_unlock(&adev->pm.mutex);
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_UNGATE);
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_UNGATE);
- amdgpu_pm_compute_clocks(adev);
- } else {
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_GATE);
- mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.vce_active = false;
- mutex_unlock(&adev->pm.mutex);
- amdgpu_pm_compute_clocks(adev);
- }
-
- }
-}
-
-void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
-{
- int i;
-
- if (adev->pp_enabled)
- /* TO DO */
- return;
-
- for (i = 0; i < adev->pm.dpm.num_ps; i++)
- amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
-
-}
-
-int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
-{
- int ret;
-
- if (adev->pm.sysfs_initialized)
- return 0;
-
- if (!adev->pp_enabled) {
- if (adev->pm.funcs->get_temperature == NULL)
- return 0;
- }
-
- adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
- DRIVER_NAME, adev,
- hwmon_groups);
- if (IS_ERR(adev->pm.int_hwmon_dev)) {
- ret = PTR_ERR(adev->pm.int_hwmon_dev);
- dev_err(adev->dev,
- "Unable to register hwmon device: %d\n", ret);
- return ret;
- }
-
- ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
- if (ret) {
- DRM_ERROR("failed to create device file for dpm state\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
- if (ret) {
- DRM_ERROR("failed to create device file for dpm state\n");
- return ret;
- }
-
- if (adev->pp_enabled) {
- ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
- if (ret) {
- DRM_ERROR("failed to create device file pp_num_states\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
- if (ret) {
- DRM_ERROR("failed to create device file pp_cur_state\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
- if (ret) {
- DRM_ERROR("failed to create device file pp_force_state\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_table);
- if (ret) {
- DRM_ERROR("failed to create device file pp_table\n");
- return ret;
- }
- }
-
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_sclk\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_mclk\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_pcie\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od);
- if (ret) {
- DRM_ERROR("failed to create device file pp_sclk_od\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od);
- if (ret) {
- DRM_ERROR("failed to create device file pp_mclk_od\n");
- return ret;
- }
- ret = device_create_file(adev->dev,
- &dev_attr_pp_gfx_power_profile);
- if (ret) {
- DRM_ERROR("failed to create device file "
- "pp_gfx_power_profile\n");
- return ret;
- }
- ret = device_create_file(adev->dev,
- &dev_attr_pp_compute_power_profile);
- if (ret) {
- DRM_ERROR("failed to create device file "
- "pp_compute_power_profile\n");
- return ret;
- }
-
- ret = amdgpu_debugfs_pm_init(adev);
- if (ret) {
- DRM_ERROR("Failed to register debugfs file for dpm!\n");
- return ret;
- }
-
- adev->pm.sysfs_initialized = true;
-
- return 0;
-}
-
-void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
-{
- if (adev->pm.int_hwmon_dev)
- hwmon_device_unregister(adev->pm.int_hwmon_dev);
- device_remove_file(adev->dev, &dev_attr_power_dpm_state);
- device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
- if (adev->pp_enabled) {
- device_remove_file(adev->dev, &dev_attr_pp_num_states);
- device_remove_file(adev->dev, &dev_attr_pp_cur_state);
- device_remove_file(adev->dev, &dev_attr_pp_force_state);
- device_remove_file(adev->dev, &dev_attr_pp_table);
- }
- device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
- device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
- device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
- device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
- device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
- device_remove_file(adev->dev,
- &dev_attr_pp_gfx_power_profile);
- device_remove_file(adev->dev,
- &dev_attr_pp_compute_power_profile);
-}
-
-void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
-{
- struct drm_device *ddev = adev->ddev;
- struct drm_crtc *crtc;
- struct amdgpu_crtc *amdgpu_crtc;
- int i = 0;
-
- if (!adev->pm.dpm_enabled)
- return;
-
- if (adev->mode_info.num_crtc)
- amdgpu_display_bandwidth_update(adev);
-
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
- if (ring && ring->ready)
- amdgpu_fence_wait_empty(ring);
- }
-
- if (adev->pp_enabled) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
- } else {
- mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.new_active_crtcs = 0;
- adev->pm.dpm.new_active_crtc_count = 0;
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc,
- &ddev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled) {
- adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
- adev->pm.dpm.new_active_crtc_count++;
- }
- }
- }
- /* update battery/ac status */
- if (power_supply_is_system_supplied() > 0)
- adev->pm.dpm.ac_power = true;
- else
- adev->pm.dpm.ac_power = false;
-
- amdgpu_dpm_change_power_state_locked(adev);
-
- mutex_unlock(&adev->pm.mutex);
- }
-}
-
-/*
- * Debugfs info
- */
-#if defined(CONFIG_DEBUG_FS)
-
-static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
-{
- uint32_t value;
- struct pp_gpu_power query = {0};
- int size;
-
- /* sanity check PP is enabled */
- if (!(adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->read_sensor))
- return -EINVAL;
-
- /* GPU Clocks */
- size = sizeof(value);
- seq_printf(m, "GFX Clocks and Power:\n");
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
- seq_printf(m, "\t%u mV (VDDGFX)\n", value);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
- seq_printf(m, "\t%u mV (VDDNB)\n", value);
- size = sizeof(query);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size)) {
- seq_printf(m, "\t%u.%u W (VDDC)\n", query.vddc_power >> 8,
- query.vddc_power & 0xff);
- seq_printf(m, "\t%u.%u W (VDDCI)\n", query.vddci_power >> 8,
- query.vddci_power & 0xff);
- seq_printf(m, "\t%u.%u W (max GPU)\n", query.max_gpu_power >> 8,
- query.max_gpu_power & 0xff);
- seq_printf(m, "\t%u.%u W (average GPU)\n", query.average_gpu_power >> 8,
- query.average_gpu_power & 0xff);
- }
- size = sizeof(value);
- seq_printf(m, "\n");
-
- /* GPU Temp */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
- seq_printf(m, "GPU Temperature: %u C\n", value/1000);
-
- /* GPU Load */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
- seq_printf(m, "GPU Load: %u %%\n", value);
- seq_printf(m, "\n");
-
- /* UVD clocks */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
- if (!value) {
- seq_printf(m, "UVD: Disabled\n");
- } else {
- seq_printf(m, "UVD: Enabled\n");
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
- }
- }
- seq_printf(m, "\n");
-
- /* VCE clocks */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
- if (!value) {
- seq_printf(m, "VCE: Disabled\n");
- } else {
- seq_printf(m, "VCE: Enabled\n");
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
- }
- }
-
- return 0;
-}
-
-static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
-{
- int i;
-
- for (i = 0; clocks[i].flag; i++)
- seq_printf(m, "\t%s: %s\n", clocks[i].name,
- (flags & clocks[i].flag) ? "On" : "Off");
-}
-
-static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
- struct drm_device *ddev = adev->ddev;
- u32 flags = 0;
-
- amdgpu_get_clockgating_state(adev, &flags);
- seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
- amdgpu_parse_cg_state(m, flags);
- seq_printf(m, "\n");
-
- if (!adev->pm.dpm_enabled) {
- seq_printf(m, "dpm not enabled\n");
- return 0;
- }
- if ((adev->flags & AMD_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
- seq_printf(m, "PX asic powered off\n");
- } else if (adev->pp_enabled) {
- return amdgpu_debugfs_pm_info_pp(m, adev);
- } else {
- mutex_lock(&adev->pm.mutex);
- if (adev->pm.funcs->debugfs_print_current_performance_level)
- adev->pm.funcs->debugfs_print_current_performance_level(adev, m);
- else
- seq_printf(m, "Debugfs support not implemented for this asic\n");
- mutex_unlock(&adev->pm.mutex);
- }
-
- return 0;
-}
-
-static const struct drm_info_list amdgpu_pm_info_list[] = {
- {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
-};
-#endif
-
-static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
-{
-#if defined(CONFIG_DEBUG_FS)
- return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
-#else
- return 0;
-#endif
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
new file mode 100644
index 000000000000..6e91ea1de5aa
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
@@ -0,0 +1,680 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/perf_event.h>
+#include <linux/init.h>
+#include "amdgpu.h"
+#include "amdgpu_pmu.h"
+
+#define PMU_NAME_SIZE 32
+#define NUM_FORMATS_AMDGPU_PMU 4
+#define NUM_FORMATS_DF_VEGA20 3
+#define NUM_EVENTS_DF_VEGA20 8
+#define NUM_EVENT_TYPES_VEGA20 1
+#define NUM_EVENTS_VEGA20_XGMI 2
+#define NUM_EVENTS_VEGA20_MAX NUM_EVENTS_VEGA20_XGMI
+#define NUM_EVENT_TYPES_ARCTURUS 1
+#define NUM_EVENTS_ARCTURUS_XGMI 6
+#define NUM_EVENTS_ARCTURUS_MAX NUM_EVENTS_ARCTURUS_XGMI
+
+struct amdgpu_pmu_event_attribute {
+ struct device_attribute attr;
+ const char *event_str;
+ unsigned int type;
+};
+
+/* record to keep track of pmu entry per pmu type per device */
+struct amdgpu_pmu_entry {
+ struct list_head entry;
+ struct amdgpu_device *adev;
+ struct pmu pmu;
+ unsigned int pmu_perf_type;
+ char *pmu_type_name;
+ char *pmu_file_prefix;
+ struct attribute_group fmt_attr_group;
+ struct amdgpu_pmu_event_attribute *fmt_attr;
+ struct attribute_group evt_attr_group;
+ struct amdgpu_pmu_event_attribute *evt_attr;
+};
+
+static ssize_t amdgpu_pmu_event_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct amdgpu_pmu_event_attribute *amdgpu_pmu_attr;
+
+ amdgpu_pmu_attr = container_of(attr, struct amdgpu_pmu_event_attribute,
+ attr);
+
+ if (!amdgpu_pmu_attr->type)
+ return sprintf(buf, "%s\n", amdgpu_pmu_attr->event_str);
+
+ return sprintf(buf, "%s,type=0x%x\n",
+ amdgpu_pmu_attr->event_str, amdgpu_pmu_attr->type);
+}
+
+static LIST_HEAD(amdgpu_pmu_list);
+
+
+struct amdgpu_pmu_attr {
+ const char *name;
+ const char *config;
+};
+
+struct amdgpu_pmu_type {
+ const unsigned int type;
+ const unsigned int num_of_type;
+};
+
+struct amdgpu_pmu_config {
+ struct amdgpu_pmu_attr *formats;
+ unsigned int num_formats;
+ struct amdgpu_pmu_attr *events;
+ unsigned int num_events;
+ struct amdgpu_pmu_type *types;
+ unsigned int num_types;
+};
+
+/*
+ * Events fall under two categories:
+ * - PMU typed
+ * Events in /sys/bus/event_source/devices/amdgpu_<pmu_type>_<dev_num> have
+ * performance counter operations handled by one IP <pmu_type>. Formats and
+ * events should be defined by <pmu_type>_<asic_type>_formats and
+ * <pmu_type>_<asic_type>_events respectively.
+ *
+ * - Event config typed
+ * Events in /sys/bus/event_source/devices/amdgpu_<dev_num> have performance
+ * counter operations that can be handled by multiple IPs dictated by their
+ * "type" format field. Formats and events should be defined by
+ * amdgpu_pmu_formats and <asic_type>_events respectively. Format field
+ * "type" is generated in amdgpu_pmu_event_show and defined in
+ * <asic_type>_event_config_types.
+ */
+
+static struct amdgpu_pmu_attr amdgpu_pmu_formats[NUM_FORMATS_AMDGPU_PMU] = {
+ { .name = "event", .config = "config:0-7" },
+ { .name = "instance", .config = "config:8-15" },
+ { .name = "umask", .config = "config:16-23"},
+ { .name = "type", .config = "config:56-63"}
+};
+
+/* Vega20 events */
+static struct amdgpu_pmu_attr vega20_events[NUM_EVENTS_VEGA20_MAX] = {
+ { .name = "xgmi_link0_data_outbound",
+ .config = "event=0x7,instance=0x46,umask=0x2" },
+ { .name = "xgmi_link1_data_outbound",
+ .config = "event=0x7,instance=0x47,umask=0x2" }
+};
+
+static struct amdgpu_pmu_type vega20_types[NUM_EVENT_TYPES_VEGA20] = {
+ { .type = AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI,
+ .num_of_type = NUM_EVENTS_VEGA20_XGMI }
+};
+
+static struct amdgpu_pmu_config vega20_config = {
+ .formats = amdgpu_pmu_formats,
+ .num_formats = ARRAY_SIZE(amdgpu_pmu_formats),
+ .events = vega20_events,
+ .num_events = ARRAY_SIZE(vega20_events),
+ .types = vega20_types,
+ .num_types = ARRAY_SIZE(vega20_types)
+};
+
+/* Vega20 data fabric (DF) events */
+static struct amdgpu_pmu_attr df_vega20_formats[NUM_FORMATS_DF_VEGA20] = {
+ { .name = "event", .config = "config:0-7" },
+ { .name = "instance", .config = "config:8-15" },
+ { .name = "umask", .config = "config:16-23"}
+};
+
+static struct amdgpu_pmu_attr df_vega20_events[NUM_EVENTS_DF_VEGA20] = {
+ { .name = "cake0_pcsout_txdata",
+ .config = "event=0x7,instance=0x46,umask=0x2" },
+ { .name = "cake1_pcsout_txdata",
+ .config = "event=0x7,instance=0x47,umask=0x2" },
+ { .name = "cake0_pcsout_txmeta",
+ .config = "event=0x7,instance=0x46,umask=0x4" },
+ { .name = "cake1_pcsout_txmeta",
+ .config = "event=0x7,instance=0x47,umask=0x4" },
+ { .name = "cake0_ftiinstat_reqalloc",
+ .config = "event=0xb,instance=0x46,umask=0x4" },
+ { .name = "cake1_ftiinstat_reqalloc",
+ .config = "event=0xb,instance=0x47,umask=0x4" },
+ { .name = "cake0_ftiinstat_rspalloc",
+ .config = "event=0xb,instance=0x46,umask=0x8" },
+ { .name = "cake1_ftiinstat_rspalloc",
+ .config = "event=0xb,instance=0x47,umask=0x8" }
+};
+
+static struct amdgpu_pmu_config df_vega20_config = {
+ .formats = df_vega20_formats,
+ .num_formats = ARRAY_SIZE(df_vega20_formats),
+ .events = df_vega20_events,
+ .num_events = ARRAY_SIZE(df_vega20_events),
+ .types = NULL,
+ .num_types = 0
+};
+
+/* Arcturus events */
+static struct amdgpu_pmu_attr arcturus_events[NUM_EVENTS_ARCTURUS_MAX] = {
+ { .name = "xgmi_link0_data_outbound",
+ .config = "event=0x7,instance=0x4b,umask=0x2" },
+ { .name = "xgmi_link1_data_outbound",
+ .config = "event=0x7,instance=0x4c,umask=0x2" },
+ { .name = "xgmi_link2_data_outbound",
+ .config = "event=0x7,instance=0x4d,umask=0x2" },
+ { .name = "xgmi_link3_data_outbound",
+ .config = "event=0x7,instance=0x4e,umask=0x2" },
+ { .name = "xgmi_link4_data_outbound",
+ .config = "event=0x7,instance=0x4f,umask=0x2" },
+ { .name = "xgmi_link5_data_outbound",
+ .config = "event=0x7,instance=0x50,umask=0x2" }
+};
+
+static struct amdgpu_pmu_type arcturus_types[NUM_EVENT_TYPES_ARCTURUS] = {
+ { .type = AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI,
+ .num_of_type = NUM_EVENTS_ARCTURUS_XGMI }
+};
+
+static struct amdgpu_pmu_config arcturus_config = {
+ .formats = amdgpu_pmu_formats,
+ .num_formats = ARRAY_SIZE(amdgpu_pmu_formats),
+ .events = arcturus_events,
+ .num_events = ARRAY_SIZE(arcturus_events),
+ .types = arcturus_types,
+ .num_types = ARRAY_SIZE(arcturus_types)
+};
+
+/* initialize perf counter */
+static int amdgpu_perf_event_init(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ /* test the event attr type check for PMU enumeration */
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* update the hw_perf_event struct with config data */
+ hwc->config = event->attr.config;
+ hwc->config_base = AMDGPU_PMU_PERF_TYPE_NONE;
+
+ return 0;
+}
+
+/* start perf counter */
+static void amdgpu_perf_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct amdgpu_pmu_entry *pe = container_of(event->pmu,
+ struct amdgpu_pmu_entry,
+ pmu);
+ int target_cntr = 0;
+
+ if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
+ return;
+
+ if ((!pe->adev->df.funcs) ||
+ (!pe->adev->df.funcs->pmc_start))
+ return;
+
+ WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+ hwc->state = 0;
+
+ switch (hwc->config_base) {
+ case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
+ case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
+ if (!(flags & PERF_EF_RELOAD)) {
+ target_cntr = pe->adev->df.funcs->pmc_start(pe->adev,
+ hwc->config, 0 /* unused */,
+ 1 /* add counter */);
+ if (target_cntr < 0)
+ break;
+
+ hwc->idx = target_cntr;
+ }
+
+ pe->adev->df.funcs->pmc_start(pe->adev, hwc->config,
+ hwc->idx, 0);
+ break;
+ default:
+ break;
+ }
+
+ perf_event_update_userpage(event);
+}
+
+/* read perf counter */
+static void amdgpu_perf_read(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct amdgpu_pmu_entry *pe = container_of(event->pmu,
+ struct amdgpu_pmu_entry,
+ pmu);
+ u64 count, prev;
+
+ if ((!pe->adev->df.funcs) ||
+ (!pe->adev->df.funcs->pmc_get_count))
+ return;
+
+ prev = local64_read(&hwc->prev_count);
+ do {
+ switch (hwc->config_base) {
+ case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
+ case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
+ pe->adev->df.funcs->pmc_get_count(pe->adev,
+ hwc->config, hwc->idx, &count);
+ break;
+ default:
+ count = 0;
+ break;
+ }
+ } while (!local64_try_cmpxchg(&hwc->prev_count, &prev, count));
+
+ local64_add(count - prev, &event->count);
+}
+
+/* stop perf counter */
+static void amdgpu_perf_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct amdgpu_pmu_entry *pe = container_of(event->pmu,
+ struct amdgpu_pmu_entry,
+ pmu);
+
+ if (hwc->state & PERF_HES_UPTODATE)
+ return;
+
+ if ((!pe->adev->df.funcs) ||
+ (!pe->adev->df.funcs->pmc_stop))
+ return;
+
+ switch (hwc->config_base) {
+ case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
+ case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
+ pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, hwc->idx,
+ 0);
+ break;
+ default:
+ break;
+ }
+
+ WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+ hwc->state |= PERF_HES_STOPPED;
+
+ if (hwc->state & PERF_HES_UPTODATE)
+ return;
+
+ amdgpu_perf_read(event);
+ hwc->state |= PERF_HES_UPTODATE;
+}
+
+/* add perf counter */
+static int amdgpu_perf_add(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int retval = 0, target_cntr;
+ struct amdgpu_pmu_entry *pe = container_of(event->pmu,
+ struct amdgpu_pmu_entry,
+ pmu);
+
+ if ((!pe->adev->df.funcs) ||
+ (!pe->adev->df.funcs->pmc_start))
+ return -EINVAL;
+
+ switch (pe->pmu_perf_type) {
+ case AMDGPU_PMU_PERF_TYPE_DF:
+ hwc->config_base = AMDGPU_PMU_EVENT_CONFIG_TYPE_DF;
+ break;
+ case AMDGPU_PMU_PERF_TYPE_ALL:
+ hwc->config_base = (hwc->config >>
+ AMDGPU_PMU_EVENT_CONFIG_TYPE_SHIFT) &
+ AMDGPU_PMU_EVENT_CONFIG_TYPE_MASK;
+ break;
+ }
+
+ event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+ switch (hwc->config_base) {
+ case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
+ case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
+ target_cntr = pe->adev->df.funcs->pmc_start(pe->adev,
+ hwc->config, 0 /* unused */,
+ 1 /* add counter */);
+ if (target_cntr < 0)
+ retval = target_cntr;
+ else
+ hwc->idx = target_cntr;
+
+ break;
+ default:
+ return 0;
+ }
+
+ if (retval)
+ return retval;
+
+ if (flags & PERF_EF_START)
+ amdgpu_perf_start(event, PERF_EF_RELOAD);
+
+ return retval;
+}
+
+/* delete perf counter */
+static void amdgpu_perf_del(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct amdgpu_pmu_entry *pe = container_of(event->pmu,
+ struct amdgpu_pmu_entry,
+ pmu);
+ if ((!pe->adev->df.funcs) ||
+ (!pe->adev->df.funcs->pmc_stop))
+ return;
+
+ amdgpu_perf_stop(event, PERF_EF_UPDATE);
+
+ switch (hwc->config_base) {
+ case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
+ case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
+ pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, hwc->idx,
+ 1);
+ break;
+ default:
+ break;
+ }
+
+ perf_event_update_userpage(event);
+}
+
+static void amdgpu_pmu_create_event_attrs_by_type(
+ struct attribute_group *attr_group,
+ struct amdgpu_pmu_event_attribute *pmu_attr,
+ struct amdgpu_pmu_attr events[],
+ int s_offset,
+ int e_offset,
+ unsigned int type)
+{
+ int i;
+
+ pmu_attr += s_offset;
+
+ for (i = s_offset; i < e_offset; i++) {
+ attr_group->attrs[i] = &pmu_attr->attr.attr;
+ sysfs_attr_init(&pmu_attr->attr.attr);
+ pmu_attr->attr.attr.name = events[i].name;
+ pmu_attr->attr.attr.mode = 0444;
+ pmu_attr->attr.show = amdgpu_pmu_event_show;
+ pmu_attr->event_str = events[i].config;
+ pmu_attr->type = type;
+ pmu_attr++;
+ }
+}
+
+static void amdgpu_pmu_create_attrs(struct attribute_group *attr_group,
+ struct amdgpu_pmu_event_attribute *pmu_attr,
+ struct amdgpu_pmu_attr events[],
+ int num_events)
+{
+ amdgpu_pmu_create_event_attrs_by_type(attr_group, pmu_attr, events, 0,
+ num_events, AMDGPU_PMU_EVENT_CONFIG_TYPE_NONE);
+}
+
+
+static int amdgpu_pmu_alloc_pmu_attrs(
+ struct attribute_group *fmt_attr_group,
+ struct amdgpu_pmu_event_attribute **fmt_attr,
+ struct attribute_group *evt_attr_group,
+ struct amdgpu_pmu_event_attribute **evt_attr,
+ struct amdgpu_pmu_config *config)
+{
+ *fmt_attr = kcalloc(config->num_formats, sizeof(**fmt_attr),
+ GFP_KERNEL);
+
+ if (!(*fmt_attr))
+ return -ENOMEM;
+
+ fmt_attr_group->attrs = kcalloc(config->num_formats + 1,
+ sizeof(*fmt_attr_group->attrs), GFP_KERNEL);
+
+ if (!fmt_attr_group->attrs)
+ goto err_fmt_attr_grp;
+
+ *evt_attr = kcalloc(config->num_events, sizeof(**evt_attr), GFP_KERNEL);
+
+ if (!(*evt_attr))
+ goto err_evt_attr;
+
+ evt_attr_group->attrs = kcalloc(config->num_events + 1,
+ sizeof(*evt_attr_group->attrs), GFP_KERNEL);
+
+ if (!evt_attr_group->attrs)
+ goto err_evt_attr_grp;
+
+ return 0;
+err_evt_attr_grp:
+ kfree(*evt_attr);
+err_evt_attr:
+ kfree(fmt_attr_group->attrs);
+err_fmt_attr_grp:
+ kfree(*fmt_attr);
+ return -ENOMEM;
+}
+
+/* init pmu tracking per pmu type */
+static int init_pmu_entry_by_type_and_add(struct amdgpu_pmu_entry *pmu_entry,
+ struct amdgpu_pmu_config *config)
+{
+ const struct attribute_group *attr_groups[] = {
+ &pmu_entry->fmt_attr_group,
+ &pmu_entry->evt_attr_group,
+ NULL
+ };
+ char pmu_name[PMU_NAME_SIZE];
+ int ret = 0, total_num_events = 0;
+
+ pmu_entry->pmu = (struct pmu){
+ .event_init = amdgpu_perf_event_init,
+ .add = amdgpu_perf_add,
+ .del = amdgpu_perf_del,
+ .start = amdgpu_perf_start,
+ .stop = amdgpu_perf_stop,
+ .read = amdgpu_perf_read,
+ .task_ctx_nr = perf_invalid_context,
+ };
+
+ ret = amdgpu_pmu_alloc_pmu_attrs(&pmu_entry->fmt_attr_group,
+ &pmu_entry->fmt_attr,
+ &pmu_entry->evt_attr_group,
+ &pmu_entry->evt_attr,
+ config);
+
+ if (ret)
+ goto err_out;
+
+ amdgpu_pmu_create_attrs(&pmu_entry->fmt_attr_group, pmu_entry->fmt_attr,
+ config->formats, config->num_formats);
+
+ if (pmu_entry->pmu_perf_type == AMDGPU_PMU_PERF_TYPE_ALL) {
+ int i;
+
+ for (i = 0; i < config->num_types; i++) {
+ amdgpu_pmu_create_event_attrs_by_type(
+ &pmu_entry->evt_attr_group,
+ pmu_entry->evt_attr,
+ config->events,
+ total_num_events,
+ total_num_events +
+ config->types[i].num_of_type,
+ config->types[i].type);
+ total_num_events += config->types[i].num_of_type;
+ }
+ } else {
+ amdgpu_pmu_create_attrs(&pmu_entry->evt_attr_group,
+ pmu_entry->evt_attr,
+ config->events, config->num_events);
+ total_num_events = config->num_events;
+ }
+
+ pmu_entry->pmu.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
+ GFP_KERNEL);
+
+ if (!pmu_entry->pmu.attr_groups) {
+ ret = -ENOMEM;
+ goto err_attr_group;
+ }
+
+ snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d", pmu_entry->pmu_file_prefix,
+ adev_to_drm(pmu_entry->adev)->primary->index);
+
+ ret = perf_pmu_register(&pmu_entry->pmu, pmu_name, -1);
+
+ if (ret)
+ goto err_register;
+
+ if (pmu_entry->pmu_perf_type != AMDGPU_PMU_PERF_TYPE_ALL)
+ pr_info("Detected AMDGPU %s Counters. # of Counters = %d.\n",
+ pmu_entry->pmu_type_name, total_num_events);
+ else
+ pr_info("Detected AMDGPU %d Perf Events.\n", total_num_events);
+
+
+ list_add_tail(&pmu_entry->entry, &amdgpu_pmu_list);
+
+ return 0;
+err_register:
+ kfree(pmu_entry->pmu.attr_groups);
+err_attr_group:
+ kfree(pmu_entry->fmt_attr_group.attrs);
+ kfree(pmu_entry->fmt_attr);
+ kfree(pmu_entry->evt_attr_group.attrs);
+ kfree(pmu_entry->evt_attr);
+err_out:
+ pr_warn("Error initializing AMDGPU %s PMUs.\n",
+ pmu_entry->pmu_type_name);
+ return ret;
+}
+
+/* destroy all pmu data associated with target device */
+void amdgpu_pmu_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_pmu_entry *pe, *temp;
+
+ list_for_each_entry_safe(pe, temp, &amdgpu_pmu_list, entry) {
+ if (pe->adev != adev)
+ continue;
+ list_del(&pe->entry);
+ perf_pmu_unregister(&pe->pmu);
+ kfree(pe->pmu.attr_groups);
+ kfree(pe->fmt_attr_group.attrs);
+ kfree(pe->fmt_attr);
+ kfree(pe->evt_attr_group.attrs);
+ kfree(pe->evt_attr);
+ kfree(pe);
+ }
+}
+
+static struct amdgpu_pmu_entry *create_pmu_entry(struct amdgpu_device *adev,
+ unsigned int pmu_type,
+ char *pmu_type_name,
+ char *pmu_file_prefix)
+{
+ struct amdgpu_pmu_entry *pmu_entry;
+
+ pmu_entry = kzalloc(sizeof(struct amdgpu_pmu_entry), GFP_KERNEL);
+
+ if (!pmu_entry)
+ return pmu_entry;
+
+ pmu_entry->adev = adev;
+ pmu_entry->fmt_attr_group.name = "format";
+ pmu_entry->fmt_attr_group.attrs = NULL;
+ pmu_entry->evt_attr_group.name = "events";
+ pmu_entry->evt_attr_group.attrs = NULL;
+ pmu_entry->pmu_perf_type = pmu_type;
+ pmu_entry->pmu_type_name = pmu_type_name;
+ pmu_entry->pmu_file_prefix = pmu_file_prefix;
+
+ return pmu_entry;
+}
+
+/* init amdgpu_pmu */
+int amdgpu_pmu_init(struct amdgpu_device *adev)
+{
+ int ret = 0;
+ struct amdgpu_pmu_entry *pmu_entry, *pmu_entry_df;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ pmu_entry_df = create_pmu_entry(adev, AMDGPU_PMU_PERF_TYPE_DF,
+ "DF", "amdgpu_df");
+
+ if (!pmu_entry_df)
+ return -ENOMEM;
+
+ ret = init_pmu_entry_by_type_and_add(pmu_entry_df,
+ &df_vega20_config);
+
+ if (ret) {
+ kfree(pmu_entry_df);
+ return ret;
+ }
+
+ pmu_entry = create_pmu_entry(adev, AMDGPU_PMU_PERF_TYPE_ALL,
+ "", "amdgpu");
+
+ if (!pmu_entry) {
+ amdgpu_pmu_fini(adev);
+ return -ENOMEM;
+ }
+
+ ret = init_pmu_entry_by_type_and_add(pmu_entry,
+ &vega20_config);
+
+ if (ret) {
+ kfree(pmu_entry);
+ amdgpu_pmu_fini(adev);
+ return ret;
+ }
+
+ break;
+ case CHIP_ARCTURUS:
+ pmu_entry = create_pmu_entry(adev, AMDGPU_PMU_PERF_TYPE_ALL,
+ "", "amdgpu");
+ if (!pmu_entry)
+ return -ENOMEM;
+
+ ret = init_pmu_entry_by_type_and_add(pmu_entry,
+ &arcturus_config);
+
+ if (ret) {
+ kfree(pmu_entry);
+ return -ENOMEM;
+ }
+
+ break;
+
+ default:
+ return 0;
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.h
new file mode 100644
index 000000000000..6882dc48c5d8
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _AMDGPU_PMU_H_
+#define _AMDGPU_PMU_H_
+
+/* PMU types. */
+enum amdgpu_pmu_perf_type {
+ AMDGPU_PMU_PERF_TYPE_NONE = 0,
+ AMDGPU_PMU_PERF_TYPE_DF,
+ AMDGPU_PMU_PERF_TYPE_ALL
+};
+
+/*
+ * PMU type AMDGPU_PMU_PERF_TYPE_ALL can hold events of different "type"
+ * configurations. Event config types are parsed from the 64-bit raw
+ * config (See EVENT_CONFIG_TYPE_SHIFT and EVENT_CONFIG_TYPE_MASK) and
+ * are registered into the HW perf events config_base.
+ *
+ * PMU types with only a single event configuration type
+ * (non-AMDGPU_PMU_PERF_TYPE_ALL) have their event config type auto generated
+ * when the performance counter is added.
+ */
+enum amdgpu_pmu_event_config_type {
+ AMDGPU_PMU_EVENT_CONFIG_TYPE_NONE = 0,
+ AMDGPU_PMU_EVENT_CONFIG_TYPE_DF,
+ AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI,
+ AMDGPU_PMU_EVENT_CONFIG_TYPE_MAX
+};
+
+#define AMDGPU_PMU_EVENT_CONFIG_TYPE_SHIFT 56
+#define AMDGPU_PMU_EVENT_CONFIG_TYPE_MASK 0xff
+
+int amdgpu_pmu_init(struct amdgpu_device *adev);
+void amdgpu_pmu_fini(struct amdgpu_device *adev);
+
+#endif /* _AMDGPU_PMU_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
deleted file mode 100644
index f5ae871aa11c..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ /dev/null
@@ -1,319 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-#include "atom.h"
-#include "amdgpu.h"
-#include "amd_shared.h"
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include "amdgpu_pm.h"
-#include <drm/amdgpu_drm.h>
-#include "amdgpu_powerplay.h"
-#include "si_dpm.h"
-#include "cik_dpm.h"
-#include "vi_dpm.h"
-
-static int amdgpu_create_pp_handle(struct amdgpu_device *adev)
-{
- struct amd_pp_init pp_init;
- struct amd_powerplay *amd_pp;
- int ret;
-
- amd_pp = &(adev->powerplay);
- pp_init.chip_family = adev->family;
- pp_init.chip_id = adev->asic_type;
- pp_init.pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false;
- pp_init.feature_mask = amdgpu_pp_feature_mask;
- pp_init.device = amdgpu_cgs_create_device(adev);
- ret = amd_powerplay_create(&pp_init, &(amd_pp->pp_handle));
- if (ret)
- return -EINVAL;
- return 0;
-}
-
-static int amdgpu_pp_early_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amd_powerplay *amd_pp;
- int ret = 0;
-
- amd_pp = &(adev->powerplay);
- adev->pp_enabled = false;
- amd_pp->pp_handle = (void *)adev;
-
- switch (adev->asic_type) {
- case CHIP_POLARIS11:
- case CHIP_POLARIS10:
- case CHIP_POLARIS12:
- case CHIP_TONGA:
- case CHIP_FIJI:
- case CHIP_TOPAZ:
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- case CHIP_VEGA10:
- adev->pp_enabled = true;
- if (amdgpu_create_pp_handle(adev))
- return -EINVAL;
- amd_pp->ip_funcs = &pp_ip_funcs;
- amd_pp->pp_funcs = &pp_dpm_funcs;
- break;
- /* These chips don't have powerplay implemenations */
-#ifdef CONFIG_DRM_AMDGPU_SI
- case CHIP_TAHITI:
- case CHIP_PITCAIRN:
- case CHIP_VERDE:
- case CHIP_OLAND:
- case CHIP_HAINAN:
- amd_pp->ip_funcs = &si_dpm_ip_funcs;
- break;
-#endif
-#ifdef CONFIG_DRM_AMDGPU_CIK
- case CHIP_BONAIRE:
- case CHIP_HAWAII:
- amd_pp->ip_funcs = &ci_dpm_ip_funcs;
- break;
- case CHIP_KABINI:
- case CHIP_MULLINS:
- case CHIP_KAVERI:
- amd_pp->ip_funcs = &kv_dpm_ip_funcs;
- break;
-#endif
- default:
- ret = -EINVAL;
- break;
- }
-
- if (adev->powerplay.ip_funcs->early_init)
- ret = adev->powerplay.ip_funcs->early_init(
- adev->powerplay.pp_handle);
-
- if (ret == PP_DPM_DISABLED) {
- adev->pm.dpm_enabled = false;
- return 0;
- }
- return ret;
-}
-
-
-static int amdgpu_pp_late_init(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->late_init)
- ret = adev->powerplay.ip_funcs->late_init(
- adev->powerplay.pp_handle);
-
- if (adev->pp_enabled && adev->pm.dpm_enabled) {
- amdgpu_pm_sysfs_init(adev);
- amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
- }
-
- return ret;
-}
-
-static int amdgpu_pp_sw_init(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->sw_init)
- ret = adev->powerplay.ip_funcs->sw_init(
- adev->powerplay.pp_handle);
-
- return ret;
-}
-
-static int amdgpu_pp_sw_fini(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->sw_fini)
- ret = adev->powerplay.ip_funcs->sw_fini(
- adev->powerplay.pp_handle);
- if (ret)
- return ret;
-
- return ret;
-}
-
-static int amdgpu_pp_hw_init(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->pp_enabled && adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
- amdgpu_ucode_init_bo(adev);
-
- if (adev->powerplay.ip_funcs->hw_init)
- ret = adev->powerplay.ip_funcs->hw_init(
- adev->powerplay.pp_handle);
-
- if (ret == PP_DPM_DISABLED) {
- adev->pm.dpm_enabled = false;
- return 0;
- }
-
- if ((amdgpu_dpm != 0) && !amdgpu_sriov_vf(adev))
- adev->pm.dpm_enabled = true;
-
- return ret;
-}
-
-static int amdgpu_pp_hw_fini(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->hw_fini)
- ret = adev->powerplay.ip_funcs->hw_fini(
- adev->powerplay.pp_handle);
-
- if (adev->pp_enabled && adev->firmware.load_type == AMDGPU_FW_LOAD_SMU)
- amdgpu_ucode_fini_bo(adev);
-
- return ret;
-}
-
-static void amdgpu_pp_late_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->late_fini)
- adev->powerplay.ip_funcs->late_fini(
- adev->powerplay.pp_handle);
-
- if (adev->pp_enabled && adev->pm.dpm_enabled)
- amdgpu_pm_sysfs_fini(adev);
-
- amd_powerplay_destroy(adev->powerplay.pp_handle);
-}
-
-static int amdgpu_pp_suspend(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->suspend)
- ret = adev->powerplay.ip_funcs->suspend(
- adev->powerplay.pp_handle);
- return ret;
-}
-
-static int amdgpu_pp_resume(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->resume)
- ret = adev->powerplay.ip_funcs->resume(
- adev->powerplay.pp_handle);
- return ret;
-}
-
-static int amdgpu_pp_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->set_clockgating_state)
- ret = adev->powerplay.ip_funcs->set_clockgating_state(
- adev->powerplay.pp_handle, state);
- return ret;
-}
-
-static int amdgpu_pp_set_powergating_state(void *handle,
- enum amd_powergating_state state)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->set_powergating_state)
- ret = adev->powerplay.ip_funcs->set_powergating_state(
- adev->powerplay.pp_handle, state);
- return ret;
-}
-
-
-static bool amdgpu_pp_is_idle(void *handle)
-{
- bool ret = true;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->is_idle)
- ret = adev->powerplay.ip_funcs->is_idle(
- adev->powerplay.pp_handle);
- return ret;
-}
-
-static int amdgpu_pp_wait_for_idle(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->wait_for_idle)
- ret = adev->powerplay.ip_funcs->wait_for_idle(
- adev->powerplay.pp_handle);
- return ret;
-}
-
-static int amdgpu_pp_soft_reset(void *handle)
-{
- int ret = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->powerplay.ip_funcs->soft_reset)
- ret = adev->powerplay.ip_funcs->soft_reset(
- adev->powerplay.pp_handle);
- return ret;
-}
-
-static const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
- .name = "amdgpu_powerplay",
- .early_init = amdgpu_pp_early_init,
- .late_init = amdgpu_pp_late_init,
- .sw_init = amdgpu_pp_sw_init,
- .sw_fini = amdgpu_pp_sw_fini,
- .hw_init = amdgpu_pp_hw_init,
- .hw_fini = amdgpu_pp_hw_fini,
- .late_fini = amdgpu_pp_late_fini,
- .suspend = amdgpu_pp_suspend,
- .resume = amdgpu_pp_resume,
- .is_idle = amdgpu_pp_is_idle,
- .wait_for_idle = amdgpu_pp_wait_for_idle,
- .soft_reset = amdgpu_pp_soft_reset,
- .set_clockgating_state = amdgpu_pp_set_clockgating_state,
- .set_powergating_state = amdgpu_pp_set_powergating_state,
-};
-
-const struct amdgpu_ip_block_version amdgpu_pp_ip_block =
-{
- .type = AMD_IP_BLOCK_TYPE_SMC,
- .major = 1,
- .minor = 0,
- .rev = 0,
- .funcs = &amdgpu_pp_ip_funcs,
-};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
new file mode 100644
index 000000000000..34b5e22b44e5
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2016-2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König, Felix Kuehling
+ */
+
+#include "amdgpu.h"
+
+/**
+ * DOC: mem_info_preempt_used
+ *
+ * The amdgpu driver provides a sysfs API for reporting current total amount of
+ * used preemptible memory.
+ * The file mem_info_preempt_used is used for this, and returns the current
+ * used size of the preemptible block, in bytes
+ */
+static ssize_t mem_info_preempt_used_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct ttm_resource_manager *man = &adev->mman.preempt_mgr;
+
+ return sysfs_emit(buf, "%llu\n", ttm_resource_manager_usage(man));
+}
+
+static DEVICE_ATTR_RO(mem_info_preempt_used);
+
+/**
+ * amdgpu_preempt_mgr_new - allocate a new node
+ *
+ * @man: TTM memory type manager
+ * @tbo: TTM BO we need this range for
+ * @place: placement flags and restrictions
+ * @res: TTM memory object
+ *
+ * Dummy, just count the space used without allocating resources or any limit.
+ */
+static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man,
+ struct ttm_buffer_object *tbo,
+ const struct ttm_place *place,
+ struct ttm_resource **res)
+{
+ *res = kzalloc(sizeof(**res), GFP_KERNEL);
+ if (!*res)
+ return -ENOMEM;
+
+ ttm_resource_init(tbo, place, *res);
+ (*res)->start = AMDGPU_BO_INVALID_OFFSET;
+ return 0;
+}
+
+/**
+ * amdgpu_preempt_mgr_del - free ranges
+ *
+ * @man: TTM memory type manager
+ * @res: TTM memory object
+ *
+ * Free the allocated GTT again.
+ */
+static void amdgpu_preempt_mgr_del(struct ttm_resource_manager *man,
+ struct ttm_resource *res)
+{
+ ttm_resource_fini(man, res);
+ kfree(res);
+}
+
+static const struct ttm_resource_manager_func amdgpu_preempt_mgr_func = {
+ .alloc = amdgpu_preempt_mgr_new,
+ .free = amdgpu_preempt_mgr_del,
+};
+
+/**
+ * amdgpu_preempt_mgr_init - init PREEMPT manager and DRM MM
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate and initialize the GTT manager.
+ */
+int amdgpu_preempt_mgr_init(struct amdgpu_device *adev)
+{
+ struct ttm_resource_manager *man = &adev->mman.preempt_mgr;
+ int ret;
+
+ man->use_tt = true;
+ man->func = &amdgpu_preempt_mgr_func;
+
+ ttm_resource_manager_init(man, &adev->mman.bdev, (1 << 30));
+
+ ret = device_create_file(adev->dev, &dev_attr_mem_info_preempt_used);
+ if (ret) {
+ DRM_ERROR("Failed to create device file mem_info_preempt_used\n");
+ return ret;
+ }
+
+ ttm_set_driver_manager(&adev->mman.bdev, AMDGPU_PL_PREEMPT, man);
+ ttm_resource_manager_set_used(man, true);
+ return 0;
+}
+
+/**
+ * amdgpu_preempt_mgr_fini - free and destroy GTT manager
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Destroy and free the GTT manager, returns -EBUSY if ranges are still
+ * allocated inside it.
+ */
+void amdgpu_preempt_mgr_fini(struct amdgpu_device *adev)
+{
+ struct ttm_resource_manager *man = &adev->mman.preempt_mgr;
+ int ret;
+
+ ttm_resource_manager_set_used(man, false);
+
+ ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
+ if (ret)
+ return;
+
+ if (adev->dev->kobj.sd)
+ device_remove_file(adev->dev, &dev_attr_mem_info_preempt_used);
+
+ ttm_resource_manager_cleanup(man);
+ ttm_set_driver_manager(&adev->mman.bdev, AMDGPU_PL_PREEMPT, NULL);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
deleted file mode 100644
index 6bdc866570ab..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright 2012 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * based on nouveau_prime.c
- *
- * Authors: Alex Deucher
- */
-#include <drm/drmP.h>
-
-#include "amdgpu.h"
-#include <drm/amdgpu_drm.h>
-#include <linux/dma-buf.h>
-
-struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- int npages = bo->tbo.num_pages;
-
- return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
-}
-
-void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj)
-{
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- int ret;
-
- ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages,
- &bo->dma_buf_vmap);
- if (ret)
- return ERR_PTR(ret);
-
- return bo->dma_buf_vmap.virtual;
-}
-
-void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
-{
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
-
- ttm_bo_kunmap(&bo->dma_buf_vmap);
-}
-
-struct drm_gem_object *
-amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sg)
-{
- struct reservation_object *resv = attach->dmabuf->resv;
- struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_bo *bo;
- int ret;
-
- ww_mutex_lock(&resv->lock, NULL);
- ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
- AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
- ww_mutex_unlock(&resv->lock);
- if (ret)
- return ERR_PTR(ret);
-
- bo->prime_shared_count = 1;
- return &bo->gem_base;
-}
-
-int amdgpu_gem_prime_pin(struct drm_gem_object *obj)
-{
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- long ret = 0;
-
- ret = amdgpu_bo_reserve(bo, false);
- if (unlikely(ret != 0))
- return ret;
-
- /*
- * Wait for all shared fences to complete before we switch to future
- * use of exclusive fence on this prime shared bo.
- */
- ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
- if (unlikely(ret < 0)) {
- DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret);
- amdgpu_bo_unreserve(bo);
- return ret;
- }
-
- /* pin buffer into GTT */
- ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL);
- if (likely(ret == 0))
- bo->prime_shared_count++;
-
- amdgpu_bo_unreserve(bo);
- return ret;
-}
-
-void amdgpu_gem_prime_unpin(struct drm_gem_object *obj)
-{
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- int ret = 0;
-
- ret = amdgpu_bo_reserve(bo, true);
- if (unlikely(ret != 0))
- return;
-
- amdgpu_bo_unpin(bo);
- if (bo->prime_shared_count)
- bo->prime_shared_count--;
- amdgpu_bo_unreserve(bo);
-}
-
-struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
-{
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
-
- return bo->tbo.resv;
-}
-
-struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *gobj,
- int flags)
-{
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
-
- if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
- return ERR_PTR(-EPERM);
-
- return drm_gem_prime_export(dev, gobj, flags);
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index ac5e92e5d59d..8c0e5d03de50 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -24,42 +24,240 @@
*/
#include <linux/firmware.h>
-#include "drmP.h"
+#include <drm/drm_drv.h>
+
#include "amdgpu.h"
#include "amdgpu_psp.h"
#include "amdgpu_ucode.h"
+#include "amdgpu_xgmi.h"
#include "soc15_common.h"
#include "psp_v3_1.h"
+#include "psp_v10_0.h"
+#include "psp_v11_0.h"
+#include "psp_v11_0_8.h"
+#include "psp_v12_0.h"
+#include "psp_v13_0.h"
+#include "psp_v13_0_4.h"
+#include "psp_v14_0.h"
+
+#include "amdgpu_ras.h"
+#include "amdgpu_securedisplay.h"
+#include "amdgpu_atomfirmware.h"
-static void psp_set_funcs(struct amdgpu_device *adev);
+#define AMD_VBIOS_FILE_MAX_SIZE_B (1024*1024*16)
-static int psp_early_init(void *handle)
+static int psp_load_smu_fw(struct psp_context *psp);
+static int psp_rap_terminate(struct psp_context *psp);
+static int psp_securedisplay_terminate(struct psp_context *psp);
+
+static int psp_ring_init(struct psp_context *psp,
+ enum psp_ring_type ring_type)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int ret = 0;
+ struct psp_ring *ring;
+ struct amdgpu_device *adev = psp->adev;
- psp_set_funcs(adev);
+ ring = &psp->km_ring;
+
+ ring->ring_type = ring_type;
+
+ /* allocate 4k Page of Local Frame Buffer memory for ring */
+ ring->ring_size = 0x1000;
+ ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->firmware.rbuf,
+ &ring->ring_mem_mc_addr,
+ (void **)&ring->ring_mem);
+ if (ret) {
+ ring->ring_size = 0;
+ return ret;
+ }
return 0;
}
-static int psp_sw_init(void *handle)
+/*
+ * Due to DF Cstate management centralized to PMFW, the firmware
+ * loading sequence will be updated as below:
+ * - Load KDB
+ * - Load SYS_DRV
+ * - Load tOS
+ * - Load PMFW
+ * - Setup TMR
+ * - Load other non-psp fw
+ * - Load ASD
+ * - Load XGMI/RAS/HDCP/DTM TA if any
+ *
+ * This new sequence is required for
+ * - Arcturus and onwards
+ */
+static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ if (amdgpu_sriov_vf(adev)) {
+ psp->pmfw_centralized_cstate_management = false;
+ return;
+ }
+
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
+ case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 7):
+ psp->pmfw_centralized_cstate_management = true;
+ break;
+ default:
+ psp->pmfw_centralized_cstate_management = false;
+ break;
+ }
+}
+
+static int psp_init_sriov_microcode(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ char ucode_prefix[30];
+ int ret = 0;
+
+ amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
+
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+ case IP_VERSION(9, 0, 0):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 9):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
+ ret = psp_init_cap_microcode(psp, ucode_prefix);
+ break;
+ case IP_VERSION(13, 0, 2):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
+ ret = psp_init_cap_microcode(psp, ucode_prefix);
+ ret &= psp_init_ta_microcode(psp, ucode_prefix);
+ break;
+ case IP_VERSION(13, 0, 0):
+ adev->virt.autoload_ucode_id = 0;
+ break;
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 14):
+ ret = psp_init_cap_microcode(psp, ucode_prefix);
+ ret &= psp_init_ta_microcode(psp, ucode_prefix);
+ break;
+ case IP_VERSION(13, 0, 10):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
+ ret = psp_init_cap_microcode(psp, ucode_prefix);
+ break;
+ case IP_VERSION(13, 0, 12):
+ ret = psp_init_ta_microcode(psp, ucode_prefix);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return ret;
+}
+
+static int psp_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct psp_context *psp = &adev->psp;
- int ret;
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- psp->init_microcode = psp_v3_1_init_microcode;
- psp->bootloader_load_sysdrv = psp_v3_1_bootloader_load_sysdrv;
- psp->bootloader_load_sos = psp_v3_1_bootloader_load_sos;
- psp->prep_cmd_buf = psp_v3_1_prep_cmd_buf;
- psp->ring_init = psp_v3_1_ring_init;
- psp->ring_create = psp_v3_1_ring_create;
- psp->ring_destroy = psp_v3_1_ring_destroy;
- psp->cmd_submit = psp_v3_1_cmd_submit;
- psp->compare_sram_data = psp_v3_1_compare_sram_data;
- psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk;
+ psp->autoload_supported = true;
+ psp->boot_time_tmr = true;
+
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+ case IP_VERSION(9, 0, 0):
+ psp_v3_1_set_psp_funcs(psp);
+ psp->autoload_supported = false;
+ psp->boot_time_tmr = false;
+ break;
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ psp_v10_0_set_psp_funcs(psp);
+ psp->autoload_supported = false;
+ psp->boot_time_tmr = false;
+ break;
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 4):
+ psp_v11_0_set_psp_funcs(psp);
+ psp->autoload_supported = false;
+ psp->boot_time_tmr = false;
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 7):
+ adev->psp.sup_pd_fw_up = !amdgpu_sriov_vf(adev);
+ fallthrough;
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
+ psp_v11_0_set_psp_funcs(psp);
+ psp->boot_time_tmr = false;
+ break;
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(12, 0, 1):
+ psp_v12_0_set_psp_funcs(psp);
+ psp->autoload_supported = false;
+ psp->boot_time_tmr = false;
+ break;
+ case IP_VERSION(13, 0, 2):
+ psp->boot_time_tmr = false;
+ fallthrough;
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 14):
+ psp_v13_0_set_psp_funcs(psp);
+ psp->autoload_supported = false;
+ break;
+ case IP_VERSION(13, 0, 12):
+ psp_v13_0_set_psp_funcs(psp);
+ psp->autoload_supported = false;
+ adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
+ break;
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 3):
+ case IP_VERSION(13, 0, 5):
+ case IP_VERSION(13, 0, 8):
+ case IP_VERSION(13, 0, 11):
+ case IP_VERSION(14, 0, 0):
+ case IP_VERSION(14, 0, 1):
+ case IP_VERSION(14, 0, 4):
+ psp_v13_0_set_psp_funcs(psp);
+ psp->boot_time_tmr = false;
+ break;
+ case IP_VERSION(11, 0, 8):
+ if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
+ psp_v11_0_8_set_psp_funcs(psp);
+ }
+ psp->autoload_supported = false;
+ psp->boot_time_tmr = false;
+ break;
+ case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 7):
+ case IP_VERSION(13, 0, 10):
+ psp_v13_0_set_psp_funcs(psp);
+ adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
+ psp->boot_time_tmr = false;
+ break;
+ case IP_VERSION(13, 0, 4):
+ psp_v13_0_4_set_psp_funcs(psp);
+ psp->boot_time_tmr = false;
+ break;
+ case IP_VERSION(14, 0, 2):
+ case IP_VERSION(14, 0, 3):
+ adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev);
+ psp_v14_0_set_psp_funcs(psp);
+ break;
+ case IP_VERSION(14, 0, 5):
+ psp_v14_0_set_psp_funcs(psp);
+ psp->boot_time_tmr = false;
break;
default:
return -EINVAL;
@@ -67,30 +265,330 @@ static int psp_sw_init(void *handle)
psp->adev = adev;
- ret = psp_init_microcode(psp);
- if (ret) {
- DRM_ERROR("Failed to load psp firmware!\n");
- return ret;
+ adev->psp_timeout = 20000;
+
+ psp_check_pmfw_centralized_cstate_management(psp);
+
+ if (amdgpu_sriov_vf(adev))
+ return psp_init_sriov_microcode(psp);
+ else
+ return psp_init_microcode(psp);
+}
+
+void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
+{
+ amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
+ &mem_ctx->shared_buf);
+ mem_ctx->shared_bo = NULL;
+}
+
+static void psp_free_shared_bufs(struct psp_context *psp)
+{
+ void *tmr_buf;
+ void **pptr;
+
+ /* free TMR memory buffer */
+ pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
+ amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
+ psp->tmr_bo = NULL;
+
+ /* free xgmi shared memory */
+ psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
+
+ /* free ras shared memory */
+ psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
+
+ /* free hdcp shared memory */
+ psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
+
+ /* free dtm shared memory */
+ psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
+
+ /* free rap shared memory */
+ psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
+
+ /* free securedisplay shared memory */
+ psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
+
+
+}
+
+static void psp_memory_training_fini(struct psp_context *psp)
+{
+ struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
+
+ ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
+ kfree(ctx->sys_cache);
+ ctx->sys_cache = NULL;
+}
+
+static int psp_memory_training_init(struct psp_context *psp)
+{
+ int ret;
+ struct psp_memory_training_context *ctx = &psp->mem_train_ctx;
+
+ if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) {
+ dev_dbg(psp->adev->dev, "memory training is not supported!\n");
+ return 0;
}
+ ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL);
+ if (ctx->sys_cache == NULL) {
+ dev_err(psp->adev->dev, "alloc mem_train_ctx.sys_cache failed!\n");
+ ret = -ENOMEM;
+ goto Err_out;
+ }
+
+ dev_dbg(psp->adev->dev,
+ "train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
+ ctx->train_data_size,
+ ctx->p2c_train_data_offset,
+ ctx->c2p_train_data_offset);
+ ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS;
return 0;
+
+Err_out:
+ psp_memory_training_fini(psp);
+ return ret;
}
-static int psp_sw_fini(void *handle)
+/*
+ * Helper funciton to query psp runtime database entry
+ *
+ * @adev: amdgpu_device pointer
+ * @entry_type: the type of psp runtime database entry
+ * @db_entry: runtime database entry pointer
+ *
+ * Return false if runtime database doesn't exit or entry is invalid
+ * or true if the specific database entry is found, and copy to @db_entry
+ */
+static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
+ enum psp_runtime_entry_type entry_type,
+ void *db_entry)
{
+ uint64_t db_header_pos, db_dir_pos;
+ struct psp_runtime_data_header db_header = {0};
+ struct psp_runtime_data_directory db_dir = {0};
+ bool ret = false;
+ int i;
+
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14))
+ return false;
+
+ db_header_pos = adev->gmc.mc_vram_size - PSP_RUNTIME_DB_OFFSET;
+ db_dir_pos = db_header_pos + sizeof(struct psp_runtime_data_header);
+
+ /* read runtime db header from vram */
+ amdgpu_device_vram_access(adev, db_header_pos, (uint32_t *)&db_header,
+ sizeof(struct psp_runtime_data_header), false);
+
+ if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
+ /* runtime db doesn't exist, exit */
+ dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
+ return false;
+ }
+
+ /* read runtime database entry from vram */
+ amdgpu_device_vram_access(adev, db_dir_pos, (uint32_t *)&db_dir,
+ sizeof(struct psp_runtime_data_directory), false);
+
+ if (db_dir.entry_count >= PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT) {
+ /* invalid db entry count, exit */
+ dev_warn(adev->dev, "Invalid PSP runtime database entry count\n");
+ return false;
+ }
+
+ /* look up for requested entry type */
+ for (i = 0; i < db_dir.entry_count && !ret; i++) {
+ if (db_dir.entry_list[i].entry_type == entry_type) {
+ switch (entry_type) {
+ case PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG:
+ if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_boot_cfg_entry)) {
+ /* invalid db entry size */
+ dev_warn(adev->dev, "Invalid PSP runtime database boot cfg entry size\n");
+ return false;
+ }
+ /* read runtime database entry */
+ amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
+ (uint32_t *)db_entry, sizeof(struct psp_runtime_boot_cfg_entry), false);
+ ret = true;
+ break;
+ case PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS:
+ if (db_dir.entry_list[i].size < sizeof(struct psp_runtime_scpm_entry)) {
+ /* invalid db entry size */
+ dev_warn(adev->dev, "Invalid PSP runtime database scpm entry size\n");
+ return false;
+ }
+ /* read runtime database entry */
+ amdgpu_device_vram_access(adev, db_header_pos + db_dir.entry_list[i].offset,
+ (uint32_t *)db_entry, sizeof(struct psp_runtime_scpm_entry), false);
+ ret = true;
+ break;
+ default:
+ ret = false;
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int psp_sw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct psp_context *psp = &adev->psp;
+ int ret;
+ struct psp_runtime_boot_cfg_entry boot_cfg_entry;
+ struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
+ struct psp_runtime_scpm_entry scpm_entry;
+
+ psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!psp->cmd) {
+ dev_err(adev->dev, "Failed to allocate memory to command buffer!\n");
+ return -ENOMEM;
+ }
+
+ adev->psp.xgmi_context.supports_extended_data =
+ !adev->gmc.xgmi.connected_to_cpu &&
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2);
+
+ memset(&scpm_entry, 0, sizeof(scpm_entry));
+ if ((psp_get_runtime_db_entry(adev,
+ PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS,
+ &scpm_entry)) &&
+ (scpm_entry.scpm_status != SCPM_DISABLE)) {
+ adev->scpm_enabled = true;
+ adev->scpm_status = scpm_entry.scpm_status;
+ } else {
+ adev->scpm_enabled = false;
+ adev->scpm_status = SCPM_DISABLE;
+ }
+
+ /* TODO: stop gpu driver services and print alarm if scpm is enabled with error status */
+
+ memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry));
+ if (psp_get_runtime_db_entry(adev,
+ PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG,
+ &boot_cfg_entry)) {
+ psp->boot_cfg_bitmask = boot_cfg_entry.boot_cfg_bitmask;
+ if ((psp->boot_cfg_bitmask) &
+ BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING) {
+ /* If psp runtime database exists, then
+ * only enable two stage memory training
+ * when TWO_STAGE_DRAM_TRAINING bit is set
+ * in runtime database
+ */
+ mem_training_ctx->enable_mem_training = true;
+ }
+
+ } else {
+ /* If psp runtime database doesn't exist or is
+ * invalid, force enable two stage memory training
+ */
+ mem_training_ctx->enable_mem_training = true;
+ }
+
+ if (mem_training_ctx->enable_mem_training) {
+ ret = psp_memory_training_init(psp);
+ if (ret) {
+ dev_err(adev->dev, "Failed to initialize memory training!\n");
+ return ret;
+ }
+
+ ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT);
+ if (ret) {
+ dev_err(adev->dev, "Failed to process memory training!\n");
+ return ret;
+ }
+ }
+
+ ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
+ (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
+ AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+ &psp->fw_pri_bo,
+ &psp->fw_pri_mc_addr,
+ &psp->fw_pri_buf);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &psp->fence_buf_bo,
+ &psp->fence_buf_mc_addr,
+ &psp->fence_buf);
+ if (ret)
+ goto failed1;
+
+ ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
+ (void **)&psp->cmd_buf_mem);
+ if (ret)
+ goto failed2;
+
+ return 0;
+
+failed2:
+ amdgpu_bo_free_kernel(&psp->fence_buf_bo,
+ &psp->fence_buf_mc_addr, &psp->fence_buf);
+failed1:
+ amdgpu_bo_free_kernel(&psp->fw_pri_bo,
+ &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
+ return ret;
+}
+
+static int psp_sw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct psp_context *psp = &adev->psp;
+
+ psp_memory_training_fini(psp);
+
+ amdgpu_ucode_release(&psp->sos_fw);
+ amdgpu_ucode_release(&psp->asd_fw);
+ amdgpu_ucode_release(&psp->ta_fw);
+ amdgpu_ucode_release(&psp->cap_fw);
+ amdgpu_ucode_release(&psp->toc_fw);
+
+ kfree(psp->cmd);
+ psp->cmd = NULL;
+
+ psp_free_shared_bufs(psp);
+
+ if (psp->km_ring.ring_mem)
+ amdgpu_bo_free_kernel(&adev->firmware.rbuf,
+ &psp->km_ring.ring_mem_mc_addr,
+ (void **)&psp->km_ring.ring_mem);
+
+ amdgpu_bo_free_kernel(&psp->fw_pri_bo,
+ &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
+ amdgpu_bo_free_kernel(&psp->fence_buf_bo,
+ &psp->fence_buf_mc_addr, &psp->fence_buf);
+ amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
+ (void **)&psp->cmd_buf_mem);
+
return 0;
}
-int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
- uint32_t reg_val, uint32_t mask, bool check_changed)
+int psp_wait_for(struct psp_context *psp, uint32_t reg_index, uint32_t reg_val,
+ uint32_t mask, uint32_t flags)
{
+ bool check_changed = flags & PSP_WAITREG_CHANGED;
+ bool verbose = !(flags & PSP_WAITREG_NOVERBOSE);
uint32_t val;
int i;
struct amdgpu_device *adev = psp->adev;
- val = RREG32(reg_index);
+ if (psp->adev->no_hw_access)
+ return 0;
for (i = 0; i < adev->usec_timeout; i++) {
+ val = RREG32(reg_index);
if (check_changed) {
if (val != reg_val)
return 0;
@@ -101,215 +599,2496 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
udelay(1);
}
+ if (verbose)
+ dev_err(adev->dev,
+ "psp reg (0x%x) wait timed out, mask: %x, read: %x exp: %x",
+ reg_index, mask, val, reg_val);
+
return -ETIME;
}
+int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
+ uint32_t reg_val, uint32_t mask, uint32_t msec_timeout)
+{
+ uint32_t val;
+ int i;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (psp->adev->no_hw_access)
+ return 0;
+
+ for (i = 0; i < msec_timeout; i++) {
+ val = RREG32(reg_index);
+ if ((val & mask) == reg_val)
+ return 0;
+ msleep(1);
+ }
+
+ return -ETIME;
+}
+
+static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id)
+{
+ switch (cmd_id) {
+ case GFX_CMD_ID_LOAD_TA:
+ return "LOAD_TA";
+ case GFX_CMD_ID_UNLOAD_TA:
+ return "UNLOAD_TA";
+ case GFX_CMD_ID_INVOKE_CMD:
+ return "INVOKE_CMD";
+ case GFX_CMD_ID_LOAD_ASD:
+ return "LOAD_ASD";
+ case GFX_CMD_ID_SETUP_TMR:
+ return "SETUP_TMR";
+ case GFX_CMD_ID_LOAD_IP_FW:
+ return "LOAD_IP_FW";
+ case GFX_CMD_ID_DESTROY_TMR:
+ return "DESTROY_TMR";
+ case GFX_CMD_ID_SAVE_RESTORE:
+ return "SAVE_RESTORE_IP_FW";
+ case GFX_CMD_ID_SETUP_VMR:
+ return "SETUP_VMR";
+ case GFX_CMD_ID_DESTROY_VMR:
+ return "DESTROY_VMR";
+ case GFX_CMD_ID_PROG_REG:
+ return "PROG_REG";
+ case GFX_CMD_ID_GET_FW_ATTESTATION:
+ return "GET_FW_ATTESTATION";
+ case GFX_CMD_ID_LOAD_TOC:
+ return "ID_LOAD_TOC";
+ case GFX_CMD_ID_AUTOLOAD_RLC:
+ return "AUTOLOAD_RLC";
+ case GFX_CMD_ID_BOOT_CFG:
+ return "BOOT_CFG";
+ case GFX_CMD_ID_CONFIG_SQ_PERFMON:
+ return "CONFIG_SQ_PERFMON";
+ case GFX_CMD_ID_FB_FW_RESERV_ADDR:
+ return "FB_FW_RESERV_ADDR";
+ case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR:
+ return "FB_FW_RESERV_EXT_ADDR";
+ case GFX_CMD_ID_SRIOV_SPATIAL_PART:
+ return "SPATIAL_PARTITION";
+ case GFX_CMD_ID_FB_NPS_MODE:
+ return "NPS_MODE_CHANGE";
+ default:
+ return "UNKNOWN CMD";
+ }
+}
+
+static bool psp_err_warn(struct psp_context *psp)
+{
+ struct psp_gfx_cmd_resp *cmd = psp->cmd_buf_mem;
+
+ /* This response indicates reg list is already loaded */
+ if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
+ cmd->cmd_id == GFX_CMD_ID_LOAD_IP_FW &&
+ cmd->cmd.cmd_load_ip_fw.fw_type == GFX_FW_TYPE_REG_LIST &&
+ cmd->resp.status == TEE_ERROR_CANCEL)
+ return false;
+
+ return true;
+}
+
static int
psp_cmd_submit_buf(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
- struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr,
- int index)
+ struct psp_gfx_cmd_resp *cmd, uint64_t fence_mc_addr)
{
int ret;
- struct amdgpu_bo *cmd_buf_bo;
- uint64_t cmd_buf_mc_addr;
- struct psp_gfx_cmd_resp *cmd_buf_mem;
- struct amdgpu_device *adev = psp->adev;
+ int index;
+ int timeout = psp->adev->psp_timeout;
+ bool ras_intr = false;
+ bool skip_unsupport = false;
- ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &cmd_buf_bo, &cmd_buf_mc_addr,
- (void **)&cmd_buf_mem);
- if (ret)
- return ret;
+ if (psp->adev->no_hw_access)
+ return 0;
- memset(cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
+ memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
- memcpy(cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
+ memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
- ret = psp_cmd_submit(psp, ucode, cmd_buf_mc_addr,
- fence_mc_addr, index);
+ index = atomic_inc_return(&psp->fence_value);
+ ret = psp_ring_cmd_submit(psp, psp->cmd_buf_mc_addr, fence_mc_addr, index);
+ if (ret) {
+ atomic_dec(&psp->fence_value);
+ goto exit;
+ }
+ amdgpu_device_invalidate_hdp(psp->adev, NULL);
while (*((unsigned int *)psp->fence_buf) != index) {
- msleep(1);
+ if (--timeout == 0)
+ break;
+ /*
+ * Shouldn't wait for timeout when err_event_athub occurs,
+ * because gpu reset thread triggered and lock resource should
+ * be released for psp resume sequence.
+ */
+ ras_intr = amdgpu_ras_intr_triggered();
+ if (ras_intr)
+ break;
+ usleep_range(10, 100);
+ amdgpu_device_invalidate_hdp(psp->adev, NULL);
+ }
+
+ /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command and PSP_ERR_UNKNOWN_COMMAND in SRIOV */
+ skip_unsupport = (psp->cmd_buf_mem->resp.status == TEE_ERROR_NOT_SUPPORTED ||
+ psp->cmd_buf_mem->resp.status == PSP_ERR_UNKNOWN_COMMAND) && amdgpu_sriov_vf(psp->adev);
+
+ memcpy(&cmd->resp, &psp->cmd_buf_mem->resp, sizeof(struct psp_gfx_resp));
+
+ /* In some cases, psp response status is not 0 even there is no
+ * problem while the command is submitted. Some version of PSP FW
+ * doesn't write 0 to that field.
+ * So here we would like to only print a warning instead of an error
+ * during psp initialization to avoid breaking hw_init and it doesn't
+ * return -EINVAL.
+ */
+ if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
+ if (ucode)
+ dev_warn(psp->adev->dev,
+ "failed to load ucode %s(0x%X) ",
+ amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
+ if (psp_err_warn(psp))
+ dev_warn(
+ psp->adev->dev,
+ "psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
+ psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
+ psp->cmd_buf_mem->cmd_id,
+ psp->cmd_buf_mem->resp.status);
+ /* If any firmware (including CAP) load fails under SRIOV, it should
+ * return failure to stop the VF from initializing.
+ * Also return failure in case of timeout
+ */
+ if ((ucode && amdgpu_sriov_vf(psp->adev)) || !timeout) {
+ ret = -EINVAL;
+ goto exit;
+ }
}
- amdgpu_bo_free_kernel(&cmd_buf_bo,
- &cmd_buf_mc_addr,
- (void **)&cmd_buf_mem);
+ if (ucode) {
+ ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
+ ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
+ }
+exit:
return ret;
}
-static void psp_prep_tmr_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint64_t tmr_mc, uint32_t size)
+static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
+{
+ struct psp_gfx_cmd_resp *cmd = psp->cmd;
+
+ mutex_lock(&psp->mutex);
+
+ memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
+
+ return cmd;
+}
+
+static void release_psp_cmd_buf(struct psp_context *psp)
{
- cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
- cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = (uint32_t)tmr_mc;
- cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = (uint32_t)(tmr_mc >> 32);
+ mutex_unlock(&psp->mutex);
+}
+
+static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
+ struct psp_gfx_cmd_resp *cmd,
+ uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t size = 0;
+ uint64_t tmr_pa = 0;
+
+ if (tmr_bo) {
+ size = amdgpu_bo_size(tmr_bo);
+ tmr_pa = amdgpu_gmc_vram_pa(adev, tmr_bo);
+ }
+
+ if (amdgpu_sriov_vf(psp->adev))
+ cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
+ else
+ cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
+ cmd->cmd.cmd_setup_tmr.buf_phy_addr_lo = lower_32_bits(tmr_mc);
+ cmd->cmd.cmd_setup_tmr.buf_phy_addr_hi = upper_32_bits(tmr_mc);
cmd->cmd.cmd_setup_tmr.buf_size = size;
+ cmd->cmd.cmd_setup_tmr.bitfield.virt_phy_addr = 1;
+ cmd->cmd.cmd_setup_tmr.system_phy_addr_lo = lower_32_bits(tmr_pa);
+ cmd->cmd.cmd_setup_tmr.system_phy_addr_hi = upper_32_bits(tmr_pa);
+}
+
+static void psp_prep_load_toc_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint64_t pri_buf_mc, uint32_t size)
+{
+ cmd->cmd_id = GFX_CMD_ID_LOAD_TOC;
+ cmd->cmd.cmd_load_toc.toc_phy_addr_lo = lower_32_bits(pri_buf_mc);
+ cmd->cmd.cmd_load_toc.toc_phy_addr_hi = upper_32_bits(pri_buf_mc);
+ cmd->cmd.cmd_load_toc.toc_size = size;
+}
+
+/* Issue LOAD TOC cmd to PSP to part toc and calculate tmr size needed */
+static int psp_load_toc(struct psp_context *psp,
+ uint32_t *tmr_size)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
+
+ /* Copy toc to psp firmware private buffer */
+ psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes);
+
+ psp_prep_load_toc_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->toc.size_bytes);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+ if (!ret)
+ *tmr_size = psp->cmd_buf_mem->resp.tmr_size;
+
+ release_psp_cmd_buf(psp);
+
+ return ret;
}
/* Set up Trusted Memory Region */
static int psp_tmr_init(struct psp_context *psp)
{
- int ret;
+ int ret = 0;
+ int tmr_size;
+ void *tmr_buf;
+ void **pptr;
/*
- * Allocate 3M memory aligned to 1M from Frame Buffer (local
- * physical).
+ * According to HW engineer, they prefer the TMR address be "naturally
+ * aligned" , e.g. the start address be an integer divide of TMR size.
*
* Note: this memory need be reserved till the driver
* uninitializes.
*/
- ret = amdgpu_bo_create_kernel(psp->adev, 0x300000, 0x100000,
- AMDGPU_GEM_DOMAIN_VRAM,
- &psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
+ tmr_size = PSP_TMR_SIZE(psp->adev);
+
+ /* For ASICs support RLC autoload, psp will parse the toc
+ * and calculate the total size of TMR needed
+ */
+ if (!amdgpu_sriov_vf(psp->adev) &&
+ psp->toc.start_addr &&
+ psp->toc.size_bytes &&
+ psp->fw_pri_buf) {
+ ret = psp_load_toc(psp, &tmr_size);
+ if (ret) {
+ dev_err(psp->adev->dev, "Failed to load toc\n");
+ return ret;
+ }
+ }
+
+ if (!psp->tmr_bo && !psp->boot_time_tmr) {
+ pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
+ ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
+ PSP_TMR_ALIGNMENT,
+ AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->tmr_bo, &psp->tmr_mc_addr,
+ pptr);
+ }
+ if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && psp->tmr_bo)
+ psp->tmr_mc_addr = amdgpu_bo_fb_aper_addr(psp->tmr_bo);
return ret;
}
+static bool psp_skip_tmr(struct psp_context *psp)
+{
+ switch (amdgpu_ip_version(psp->adev, MP0_HWIP, 0)) {
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 12):
+ case IP_VERSION(13, 0, 14):
+ return true;
+ default:
+ return false;
+ }
+}
+
static int psp_tmr_load(struct psp_context *psp)
{
int ret;
struct psp_gfx_cmd_resp *cmd;
- cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
+ /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR.
+ * Already set up by host driver.
+ */
+ if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
+ return 0;
+
+ cmd = acquire_psp_cmd_buf(psp);
- psp_prep_tmr_cmd_buf(cmd, psp->tmr_mc_addr, 0x300000);
+ psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
+ if (psp->tmr_bo)
+ dev_info(psp->adev->dev, "reserve 0x%lx from 0x%llx for PSP TMR\n",
+ amdgpu_bo_size(psp->tmr_bo), psp->tmr_mc_addr);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
- psp->fence_buf_mc_addr, 1);
+ psp->fence_buf_mc_addr);
+
+ release_psp_cmd_buf(psp);
+
+ return ret;
+}
+
+static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
+ struct psp_gfx_cmd_resp *cmd)
+{
+ if (amdgpu_sriov_vf(psp->adev))
+ cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
+ else
+ cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
+}
+
+static int psp_tmr_unload(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /* skip TMR unload for Navi12 and CHIP_SIENNA_CICHLID SRIOV,
+ * as TMR is not loaded at all
+ */
+ if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
+ return 0;
+
+ cmd = acquire_psp_cmd_buf(psp);
+
+ psp_prep_tmr_unload_cmd_buf(psp, cmd);
+ dev_dbg(psp->adev->dev, "free PSP TMR buffer\n");
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+
+ release_psp_cmd_buf(psp);
+
+ return ret;
+}
+
+static int psp_tmr_terminate(struct psp_context *psp)
+{
+ return psp_tmr_unload(psp);
+}
+
+int psp_get_fw_attestation_records_addr(struct psp_context *psp,
+ uint64_t *output_ptr)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ if (!output_ptr)
+ return -EINVAL;
+
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = acquire_psp_cmd_buf(psp);
+
+ cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+
+ if (!ret) {
+ *output_ptr = ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_lo) +
+ ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
+ }
+
+ release_psp_cmd_buf(psp);
+
+ return ret;
+}
+
+static int psp_get_fw_reservation_info(struct psp_context *psp,
+ uint32_t cmd_id,
+ uint64_t *addr,
+ uint32_t *size)
+{
+ int ret;
+ uint32_t status;
+ struct psp_gfx_cmd_resp *cmd;
+
+ cmd = acquire_psp_cmd_buf(psp);
+
+ cmd->cmd_id = cmd_id;
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+ if (ret) {
+ release_psp_cmd_buf(psp);
+ return ret;
+ }
+
+ status = cmd->resp.status;
+ if (status == PSP_ERR_UNKNOWN_COMMAND) {
+ release_psp_cmd_buf(psp);
+ *addr = 0;
+ *size = 0;
+ return 0;
+ }
+
+ *addr = (uint64_t)cmd->resp.uresp.fw_reserve_info.reserve_base_address_hi << 32 |
+ cmd->resp.uresp.fw_reserve_info.reserve_base_address_lo;
+ *size = cmd->resp.uresp.fw_reserve_info.reserve_size;
+
+ release_psp_cmd_buf(psp);
+
+ return 0;
+}
+
+int psp_update_fw_reservation(struct psp_context *psp)
+{
+ int ret;
+ uint64_t reserv_addr, reserv_addr_ext;
+ uint32_t reserv_size, reserv_size_ext, mp0_ip_ver;
+ struct amdgpu_device *adev = psp->adev;
+
+ mp0_ip_ver = amdgpu_ip_version(adev, MP0_HWIP, 0);
+
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ switch (mp0_ip_ver) {
+ case IP_VERSION(14, 0, 2):
+ if (adev->psp.sos.fw_version < 0x3b0e0d)
+ return 0;
+ break;
+
+ case IP_VERSION(14, 0, 3):
+ if (adev->psp.sos.fw_version < 0x3a0e14)
+ return 0;
+ break;
+
+ default:
+ return 0;
+ }
+
+ ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size);
if (ret)
- goto failed;
+ return ret;
+ ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR, &reserv_addr_ext, &reserv_size_ext);
+ if (ret)
+ return ret;
+
+ if (reserv_addr != adev->gmc.real_vram_size - reserv_size) {
+ dev_warn(adev->dev, "reserve fw region is not valid!\n");
+ return 0;
+ }
+
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
- kfree(cmd);
+ reserv_size = roundup(reserv_size, SZ_1M);
+
+ ret = amdgpu_bo_create_kernel_at(adev, reserv_addr, reserv_size, &adev->mman.fw_reserved_memory, NULL);
+ if (ret) {
+ dev_err(adev->dev, "reserve fw region failed(%d)!\n", ret);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL);
+ return ret;
+ }
+
+ reserv_size_ext = roundup(reserv_size_ext, SZ_1M);
+
+ ret = amdgpu_bo_create_kernel_at(adev, reserv_addr_ext, reserv_size_ext,
+ &adev->mman.fw_reserved_memory_extend, NULL);
+ if (ret) {
+ dev_err(adev->dev, "reserve extend fw region failed(%d)!\n", ret);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL, NULL);
+ return ret;
+ }
return 0;
+}
+
+static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
+{
+ struct psp_context *psp = &adev->psp;
+ struct psp_gfx_cmd_resp *cmd;
+ int ret;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ cmd = acquire_psp_cmd_buf(psp);
+
+ cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
+ cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+ if (!ret) {
+ *boot_cfg =
+ (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
+ }
+
+ release_psp_cmd_buf(psp);
+
+ return ret;
+}
+
+static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
+{
+ int ret;
+ struct psp_context *psp = &adev->psp;
+ struct psp_gfx_cmd_resp *cmd;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ cmd = acquire_psp_cmd_buf(psp);
+
+ cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
+ cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
+ cmd->cmd.boot_cfg.boot_config = boot_cfg;
+ cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ release_psp_cmd_buf(psp);
-failed:
- kfree(cmd);
return ret;
}
-static void psp_prep_asd_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint64_t asd_mc, uint64_t asd_mc_shared,
- uint32_t size, uint32_t shared_size)
+static int psp_rl_load(struct amdgpu_device *adev)
{
- cmd->cmd_id = GFX_CMD_ID_LOAD_ASD;
- cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc);
- cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc);
- cmd->cmd.cmd_load_ta.app_len = size;
+ int ret;
+ struct psp_context *psp = &adev->psp;
+ struct psp_gfx_cmd_resp *cmd;
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(asd_mc_shared);
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(asd_mc_shared);
- cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
+ if (!is_psp_fw_valid(psp->rl))
+ return 0;
+
+ cmd = acquire_psp_cmd_buf(psp);
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
+
+ cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
+ cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
+ cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
+ cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
+ cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ release_psp_cmd_buf(psp);
+
+ return ret;
}
-static int psp_asd_init(struct psp_context *psp)
+int psp_memory_partition(struct psp_context *psp, int mode)
{
+ struct psp_gfx_cmd_resp *cmd;
int ret;
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = acquire_psp_cmd_buf(psp);
+
+ cmd->cmd_id = GFX_CMD_ID_FB_NPS_MODE;
+ cmd->cmd.cmd_memory_part.mode = mode;
+
+ dev_info(psp->adev->dev,
+ "Requesting %d memory partition change through PSP", mode);
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "PSP request failed to change to NPS%d mode\n", mode);
+
+ release_psp_cmd_buf(psp);
+
+ return ret;
+}
+
+int psp_spatial_partition(struct psp_context *psp, int mode)
+{
+ struct psp_gfx_cmd_resp *cmd;
+ int ret;
+
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = acquire_psp_cmd_buf(psp);
+
+ cmd->cmd_id = GFX_CMD_ID_SRIOV_SPATIAL_PART;
+ cmd->cmd.cmd_spatial_part.mode = mode;
+
+ dev_info(psp->adev->dev, "Requesting %d partitions through PSP", mode);
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ release_psp_cmd_buf(psp);
+
+ return ret;
+}
+
+static int psp_asd_initialize(struct psp_context *psp)
+{
+ int ret;
+
+ /* If PSP version doesn't match ASD version, asd loading will be failed.
+ * add workaround to bypass it for sriov now.
+ * TODO: add version check to make it common
+ */
+ if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
+ return 0;
+
+ /* bypass asd if display hardware is not available */
+ if (!amdgpu_device_has_display_hardware(psp->adev) &&
+ amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >= IP_VERSION(13, 0, 10))
+ return 0;
+
+ psp->asd_context.mem_context.shared_mc_addr = 0;
+ psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
+ psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD;
+
+ ret = psp_ta_load(psp, &psp->asd_context);
+ if (!ret)
+ psp->asd_context.initialized = true;
+
+ return ret;
+}
+
+static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
+ cmd->cmd.cmd_unload_ta.session_id = session_id;
+}
+
+int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
+
+ psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ context->resp_status = cmd->resp.status;
+
+ release_psp_cmd_buf(psp);
+
+ return ret;
+}
+
+static int psp_asd_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->asd_context.initialized)
+ return 0;
+
+ ret = psp_ta_unload(psp, &psp->asd_context);
+ if (!ret)
+ psp->asd_context.initialized = false;
+
+ return ret;
+}
+
+static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t id, uint32_t value)
+{
+ cmd->cmd_id = GFX_CMD_ID_PROG_REG;
+ cmd->cmd.cmd_setup_reg_prog.reg_value = value;
+ cmd->cmd.cmd_setup_reg_prog.reg_id = id;
+}
+
+int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
+ uint32_t value)
+{
+ struct psp_gfx_cmd_resp *cmd;
+ int ret = 0;
+
+ if (reg >= PSP_REG_LAST)
+ return -EINVAL;
+
+ cmd = acquire_psp_cmd_buf(psp);
+
+ psp_prep_reg_prog_cmd_buf(cmd, reg, value);
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+ if (ret)
+ dev_err(psp->adev->dev, "PSP failed to program reg id %d\n", reg);
+
+ release_psp_cmd_buf(psp);
+
+ return ret;
+}
+
+static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint64_t ta_bin_mc,
+ struct ta_context *context)
+{
+ cmd->cmd_id = context->ta_load_type;
+ cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc);
+ cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc);
+ cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes;
+
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
+ lower_32_bits(context->mem_context.shared_mc_addr);
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
+ upper_32_bits(context->mem_context.shared_mc_addr);
+ cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
+}
+
+int psp_ta_init_shared_buf(struct psp_context *psp,
+ struct ta_mem_context *mem_ctx)
+{
/*
* Allocate 16k memory aligned to 4k from Frame Buffer (local
- * physical) for shared ASD <-> Driver
+ * physical) for ta to host memory
*/
- ret = amdgpu_bo_create_kernel(psp->adev, PSP_ASD_SHARED_MEM_SIZE,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &psp->asd_shared_bo,
- &psp->asd_shared_mc_addr,
- &psp->asd_shared_buf);
+ return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &mem_ctx->shared_bo,
+ &mem_ctx->shared_mc_addr,
+ &mem_ctx->shared_buf);
+}
+
+static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t ta_cmd_id,
+ uint32_t session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
+ cmd->cmd.cmd_invoke_cmd.session_id = session_id;
+ cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
+}
+
+int psp_ta_invoke(struct psp_context *psp,
+ uint32_t ta_cmd_id,
+ struct ta_context *context)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
+
+ psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+
+ context->resp_status = cmd->resp.status;
+
+ release_psp_cmd_buf(psp);
return ret;
}
-static int psp_asd_load(struct psp_context *psp)
+int psp_ta_load(struct psp_context *psp, struct ta_context *context)
{
int ret;
struct psp_gfx_cmd_resp *cmd;
- cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
+ cmd = acquire_psp_cmd_buf(psp);
- memset(psp->fw_pri_buf, 0, PSP_1_MEG);
- memcpy(psp->fw_pri_buf, psp->asd_start_addr, psp->asd_ucode_size);
+ psp_copy_fw(psp, context->bin_desc.start_addr,
+ context->bin_desc.size_bytes);
- psp_prep_asd_cmd_buf(cmd, psp->fw_pri_mc_addr, psp->asd_shared_mc_addr,
- psp->asd_ucode_size, PSP_ASD_SHARED_MEM_SIZE);
+ if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) &&
+ context->mem_context.shared_bo)
+ context->mem_context.shared_mc_addr =
+ amdgpu_bo_fb_aper_addr(context->mem_context.shared_bo);
+
+ psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
- psp->fence_buf_mc_addr, 2);
+ psp->fence_buf_mc_addr);
+
+ context->resp_status = cmd->resp.status;
+
+ if (!ret)
+ context->session_id = cmd->resp.session_id;
- kfree(cmd);
+ release_psp_cmd_buf(psp);
return ret;
}
-static int psp_hw_start(struct psp_context *psp)
+int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+ return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context);
+}
+
+int psp_xgmi_terminate(struct psp_context *psp)
{
int ret;
+ struct amdgpu_device *adev = psp->adev;
+
+ /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
+ (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 2) &&
+ adev->gmc.xgmi.connected_to_cpu))
+ return 0;
- ret = psp_bootloader_load_sysdrv(psp);
+ if (!psp->xgmi_context.context.initialized)
+ return 0;
+
+ ret = psp_ta_unload(psp, &psp->xgmi_context.context);
+
+ psp->xgmi_context.context.initialized = false;
+
+ return ret;
+}
+
+int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
+{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ int ret;
+
+ if (!psp->ta_fw ||
+ !psp->xgmi_context.context.bin_desc.size_bytes ||
+ !psp->xgmi_context.context.bin_desc.start_addr)
+ return -ENOENT;
+
+ if (!load_ta)
+ goto invoke;
+
+ psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
+ psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
+
+ if (!psp->xgmi_context.context.mem_context.shared_buf) {
+ ret = psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
+ if (ret)
+ return ret;
+ }
+
+ /* Load XGMI TA */
+ ret = psp_ta_load(psp, &psp->xgmi_context.context);
+ if (!ret)
+ psp->xgmi_context.context.initialized = true;
+ else
+ return ret;
+
+invoke:
+ /* Initialize XGMI session */
+ xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+ xgmi_cmd->flag_extend_link_record = set_extended_data;
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
+
+ ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+ /* note down the capbility flag for XGMI TA */
+ psp->xgmi_context.xgmi_ta_caps = xgmi_cmd->caps_flag;
+
+ return ret;
+}
+
+int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
+{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ int ret;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
+
+ /* Invoke xgmi ta to get hive id */
+ ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
if (ret)
return ret;
- ret = psp_bootloader_load_sos(psp);
+ *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
+
+ return 0;
+}
+
+int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
+{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ int ret;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
+
+ /* Invoke xgmi ta to get the node id */
+ ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
if (ret)
return ret;
- ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
+ *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
+
+ return 0;
+}
+
+static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
+{
+ return (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
+ IP_VERSION(13, 0, 2) &&
+ psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b) ||
+ amdgpu_ip_version(psp->adev, MP0_HWIP, 0) >=
+ IP_VERSION(13, 0, 6);
+}
+
+/*
+ * Chips that support extended topology information require the driver to
+ * reflect topology information in the opposite direction. This is
+ * because the TA has already exceeded its link record limit and if the
+ * TA holds bi-directional information, the driver would have to do
+ * multiple fetches instead of just two.
+ */
+static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
+ struct psp_xgmi_node_info node_info)
+{
+ struct amdgpu_device *mirror_adev;
+ struct amdgpu_hive_info *hive;
+ uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
+ uint64_t dst_node_id = node_info.node_id;
+ uint8_t dst_num_hops = node_info.num_hops;
+ uint8_t dst_num_links = node_info.num_links;
+
+ hive = amdgpu_get_xgmi_hive(psp->adev);
+ if (WARN_ON(!hive))
+ return;
+
+ list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
+ struct psp_xgmi_topology_info *mirror_top_info;
+ int j;
+
+ if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
+ continue;
+
+ mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
+ for (j = 0; j < mirror_top_info->num_nodes; j++) {
+ if (mirror_top_info->nodes[j].node_id != src_node_id)
+ continue;
+
+ mirror_top_info->nodes[j].num_hops = dst_num_hops;
+ /*
+ * prevent 0 num_links value re-reflection since reflection
+ * criteria is based on num_hops (direct or indirect).
+ *
+ */
+ if (dst_num_links)
+ mirror_top_info->nodes[j].num_links = dst_num_links;
+
+ break;
+ }
+
+ break;
+ }
+
+ amdgpu_put_xgmi_hive(hive);
+}
+
+int psp_xgmi_get_topology_info(struct psp_context *psp,
+ int number_devices,
+ struct psp_xgmi_topology_info *topology,
+ bool get_extended_data)
+{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
+ struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
+ int i;
+ int ret;
+
+ if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
+ return -EINVAL;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+ xgmi_cmd->flag_extend_link_record = get_extended_data;
+
+ /* Fill in the shared memory with topology information as input */
+ topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_TOPOLOGY_INFO;
+ topology_info_input->num_nodes = number_devices;
+
+ for (i = 0; i < topology_info_input->num_nodes; i++) {
+ topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
+ topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
+ topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
+ topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
+ }
+
+ /* Invoke xgmi ta to get the topology information */
+ ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_TOPOLOGY_INFO);
if (ret)
return ret;
- ret = psp_tmr_load(psp);
+ /* Read the output topology information from the shared memory */
+ topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
+ topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
+ for (i = 0; i < topology->num_nodes; i++) {
+ /* extended data will either be 0 or equal to non-extended data */
+ if (topology_info_output->nodes[i].num_hops)
+ topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
+
+ /* non-extended data gets everything here so no need to update */
+ if (!get_extended_data) {
+ topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
+ topology->nodes[i].is_sharing_enabled =
+ topology_info_output->nodes[i].is_sharing_enabled;
+ topology->nodes[i].sdma_engine =
+ topology_info_output->nodes[i].sdma_engine;
+ }
+
+ }
+
+ /* Invoke xgmi ta again to get the link information */
+ if (psp_xgmi_peer_link_info_supported(psp)) {
+ struct ta_xgmi_cmd_get_peer_link_info *link_info_output;
+ struct ta_xgmi_cmd_get_extend_peer_link_info *link_extend_info_output;
+ bool requires_reflection =
+ (psp->xgmi_context.supports_extended_data &&
+ get_extended_data) ||
+ amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
+ IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(psp->adev, MP0_HWIP, 0) ==
+ IP_VERSION(13, 0, 14);
+ bool ta_port_num_support = amdgpu_sriov_vf(psp->adev) ? 0 :
+ psp->xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG;
+
+ /* popluate the shared output buffer rather than the cmd input buffer
+ * with node_ids as the input for GET_PEER_LINKS command execution.
+ * This is required for GET_PEER_LINKS per xgmi ta implementation.
+ * The same requirement for GET_EXTEND_PEER_LINKS command.
+ */
+ if (ta_port_num_support) {
+ link_extend_info_output = &xgmi_cmd->xgmi_out_message.get_extend_link_info;
+
+ for (i = 0; i < topology->num_nodes; i++)
+ link_extend_info_output->nodes[i].node_id = topology->nodes[i].node_id;
+
+ link_extend_info_output->num_nodes = topology->num_nodes;
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_EXTEND_PEER_LINKS;
+ } else {
+ link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
+
+ for (i = 0; i < topology->num_nodes; i++)
+ link_info_output->nodes[i].node_id = topology->nodes[i].node_id;
+
+ link_info_output->num_nodes = topology->num_nodes;
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_PEER_LINKS;
+ }
+
+ ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < topology->num_nodes; i++) {
+ uint8_t node_num_links = ta_port_num_support ?
+ link_extend_info_output->nodes[i].num_links : link_info_output->nodes[i].num_links;
+ /* accumulate num_links on extended data */
+ if (get_extended_data) {
+ topology->nodes[i].num_links = topology->nodes[i].num_links + node_num_links;
+ } else {
+ topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
+ topology->nodes[i].num_links : node_num_links;
+ }
+ /* popluate the connected port num info if supported and available */
+ if (ta_port_num_support && topology->nodes[i].num_links) {
+ memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
+ sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
+ }
+
+ /* reflect the topology information for bi-directionality */
+ if (requires_reflection && topology->nodes[i].num_hops)
+ psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
+ }
+ }
+
+ return 0;
+}
+
+int psp_xgmi_set_topology_info(struct psp_context *psp,
+ int number_devices,
+ struct psp_xgmi_topology_info *topology)
+{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
+ int i;
+
+ if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
+ return -EINVAL;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+ topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
+ topology_info_input->num_nodes = number_devices;
+
+ for (i = 0; i < topology_info_input->num_nodes; i++) {
+ topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
+ topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
+ topology_info_input->nodes[i].is_sharing_enabled = 1;
+ topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
+ }
+
+ /* Invoke xgmi ta to set topology information */
+ return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
+}
+
+// ras begin
+static void psp_ras_ta_check_status(struct psp_context *psp)
+{
+ struct ta_ras_shared_memory *ras_cmd =
+ (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
+
+ switch (ras_cmd->ras_status) {
+ case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
+ dev_warn(psp->adev->dev,
+ "RAS WARNING: cmd failed due to unsupported ip\n");
+ break;
+ case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
+ dev_warn(psp->adev->dev,
+ "RAS WARNING: cmd failed due to unsupported error injection\n");
+ break;
+ case TA_RAS_STATUS__SUCCESS:
+ break;
+ case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED:
+ if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR)
+ dev_warn(psp->adev->dev,
+ "RAS WARNING: Inject error to critical region is not allowed\n");
+ break;
+ default:
+ dev_warn(psp->adev->dev,
+ "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
+ break;
+ }
+}
+
+static int psp_ras_send_cmd(struct psp_context *psp,
+ enum ras_command cmd_id, void *in, void *out)
+{
+ struct ta_ras_shared_memory *ras_cmd;
+ uint32_t cmd = cmd_id;
+ int ret = 0;
+
+ if (!in)
+ return -EINVAL;
+
+ mutex_lock(&psp->ras_context.mutex);
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
+ memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
+
+ switch (cmd) {
+ case TA_RAS_COMMAND__ENABLE_FEATURES:
+ case TA_RAS_COMMAND__DISABLE_FEATURES:
+ memcpy(&ras_cmd->ras_in_message,
+ in, sizeof(ras_cmd->ras_in_message));
+ break;
+ case TA_RAS_COMMAND__TRIGGER_ERROR:
+ memcpy(&ras_cmd->ras_in_message.trigger_error,
+ in, sizeof(ras_cmd->ras_in_message.trigger_error));
+ break;
+ case TA_RAS_COMMAND__QUERY_ADDRESS:
+ memcpy(&ras_cmd->ras_in_message.address,
+ in, sizeof(ras_cmd->ras_in_message.address));
+ break;
+ default:
+ dev_err(psp->adev->dev, "Invalid ras cmd id: %u\n", cmd);
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ ras_cmd->cmd_id = cmd;
+ ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
+
+ switch (cmd) {
+ case TA_RAS_COMMAND__TRIGGER_ERROR:
+ if (!ret && out)
+ memcpy(out, &ras_cmd->ras_status, sizeof(ras_cmd->ras_status));
+ break;
+ case TA_RAS_COMMAND__QUERY_ADDRESS:
+ if (ret || ras_cmd->ras_status || psp->cmd_buf_mem->resp.status)
+ ret = -EINVAL;
+ else if (out)
+ memcpy(out,
+ &ras_cmd->ras_out_message.address,
+ sizeof(ras_cmd->ras_out_message.address));
+ break;
+ default:
+ break;
+ }
+
+err_out:
+ mutex_unlock(&psp->ras_context.mutex);
+
+ return ret;
+}
+
+int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+ struct ta_ras_shared_memory *ras_cmd;
+ int ret;
+
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context);
+
+ if (amdgpu_ras_intr_triggered())
+ return ret;
+
+ if (ras_cmd->if_version > RAS_TA_HOST_IF_VER) {
+ dev_warn(psp->adev->dev, "RAS: Unsupported Interface\n");
+ return -EINVAL;
+ }
+
+ if (!ret) {
+ if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
+ dev_warn(psp->adev->dev, "ECC switch disabled\n");
+
+ ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
+ } else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
+ dev_warn(psp->adev->dev,
+ "RAS internal register access blocked\n");
+
+ psp_ras_ta_check_status(psp);
+ }
+
+ return ret;
+}
+
+int psp_ras_enable_features(struct psp_context *psp,
+ union ta_ras_cmd_input *info, bool enable)
+{
+ enum ras_command cmd_id;
+ int ret;
+
+ if (!psp->ras_context.context.initialized || !info)
+ return -EINVAL;
+
+ cmd_id = enable ?
+ TA_RAS_COMMAND__ENABLE_FEATURES : TA_RAS_COMMAND__DISABLE_FEATURES;
+ ret = psp_ras_send_cmd(psp, cmd_id, info, NULL);
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+int psp_ras_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO: bypass the terminate in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->ras_context.context.initialized)
+ return 0;
+
+ ret = psp_ta_unload(psp, &psp->ras_context.context);
+
+ psp->ras_context.context.initialized = false;
+
+ mutex_destroy(&psp->ras_context.mutex);
+
+ return ret;
+}
+
+int psp_ras_initialize(struct psp_context *psp)
+{
+ int ret;
+ uint32_t boot_cfg = 0xFF;
+ struct amdgpu_device *adev = psp->adev;
+ struct ta_ras_shared_memory *ras_cmd;
+
+ /*
+ * TODO: bypass the initialize in sriov for now
+ */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
+ !adev->psp.ras_context.context.bin_desc.start_addr) {
+ dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
+ return 0;
+ }
+
+ if (amdgpu_atomfirmware_dynamic_boot_config_supported(adev)) {
+ /* query GECC enablement status from boot config
+ * boot_cfg: 1: GECC is enabled or 0: GECC is disabled
+ */
+ ret = psp_boot_config_get(adev, &boot_cfg);
+ if (ret)
+ dev_warn(adev->dev, "PSP get boot config failed\n");
+
+ if (boot_cfg == 1 && !adev->ras_default_ecc_enabled &&
+ amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
+ dev_warn(adev->dev, "GECC is currently enabled, which may affect performance\n");
+ dev_warn(adev->dev,
+ "To disable GECC, please reboot the system and load the amdgpu driver with the parameter amdgpu_ras_enable=0\n");
+ } else {
+ if ((adev->ras_default_ecc_enabled || amdgpu_ras_enable == 1) &&
+ amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
+ if (boot_cfg == 1) {
+ dev_info(adev->dev, "GECC is enabled\n");
+ } else {
+ /* enable GECC in next boot cycle if it is disabled
+ * in boot config, or force enable GECC if failed to
+ * get boot configuration
+ */
+ ret = psp_boot_config_set(adev, BOOT_CONFIG_GECC);
+ if (ret)
+ dev_warn(adev->dev, "PSP set boot config failed\n");
+ else
+ dev_warn(adev->dev, "GECC will be enabled in next boot cycle\n");
+ }
+ } else {
+ if (!boot_cfg) {
+ if (!adev->ras_default_ecc_enabled &&
+ amdgpu_ras_enable != 1 &&
+ amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
+ dev_warn(adev->dev, "GECC is disabled, set amdgpu_ras_enable=1 to enable GECC in next boot cycle if needed\n");
+ else
+ dev_info(adev->dev, "GECC is disabled\n");
+ } else {
+ /* disable GECC in next boot cycle if ras is
+ * disabled by module parameter amdgpu_ras_enable
+ * and/or amdgpu_ras_mask, or boot_config_get call
+ * is failed
+ */
+ ret = psp_boot_config_set(adev, 0);
+ if (ret)
+ dev_warn(adev->dev, "PSP set boot config failed\n");
+ else
+ dev_warn(adev->dev, "GECC will be disabled in next boot cycle if set amdgpu_ras_enable and/or amdgpu_ras_mask to 0x0\n");
+ }
+ }
+ }
+ }
+
+ psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
+ psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
+
+ if (!psp->ras_context.context.mem_context.shared_buf) {
+ ret = psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
+ if (ret)
+ return ret;
+ }
+
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
+ memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
+
+ if (amdgpu_ras_is_poison_mode_supported(adev))
+ ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
+ if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu)
+ ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
+ ras_cmd->ras_in_message.init_flags.xcc_mask =
+ adev->gfx.xcc_mask;
+ ras_cmd->ras_in_message.init_flags.channel_dis_num = hweight32(adev->gmc.m_half_use) * 2;
+ if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+ ras_cmd->ras_in_message.init_flags.nps_mode =
+ adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ ras_cmd->ras_in_message.init_flags.active_umc_mask = adev->umc.active_mask;
+
+ ret = psp_ta_load(psp, &psp->ras_context.context);
+
+ if (!ret && !ras_cmd->ras_status) {
+ psp->ras_context.context.initialized = true;
+ mutex_init(&psp->ras_context.mutex);
+ } else {
+ if (ras_cmd->ras_status)
+ dev_warn(adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
+
+ /* fail to load RAS TA */
+ psp->ras_context.context.initialized = false;
+ }
+
+ return ret;
+}
+
+int psp_ras_trigger_error(struct psp_context *psp,
+ struct ta_ras_trigger_error_input *info, uint32_t instance_mask)
+{
+ struct amdgpu_device *adev = psp->adev;
+ int ret;
+ uint32_t dev_mask;
+ uint32_t ras_status = 0;
+
+ if (!psp->ras_context.context.initialized || !info)
+ return -EINVAL;
+
+ switch (info->block_id) {
+ case TA_RAS_BLOCK__GFX:
+ dev_mask = GET_MASK(GC, instance_mask);
+ break;
+ case TA_RAS_BLOCK__SDMA:
+ dev_mask = GET_MASK(SDMA0, instance_mask);
+ break;
+ case TA_RAS_BLOCK__VCN:
+ case TA_RAS_BLOCK__JPEG:
+ dev_mask = GET_MASK(VCN, instance_mask);
+ break;
+ default:
+ dev_mask = instance_mask;
+ break;
+ }
+
+ /* reuse sub_block_index for backward compatibility */
+ dev_mask <<= AMDGPU_RAS_INST_SHIFT;
+ dev_mask &= AMDGPU_RAS_INST_MASK;
+ info->sub_block_index |= dev_mask;
+
+ ret = psp_ras_send_cmd(psp,
+ TA_RAS_COMMAND__TRIGGER_ERROR, info, &ras_status);
if (ret)
+ return -EINVAL;
+
+ /* If err_event_athub occurs error inject was successful, however
+ * return status from TA is no long reliable
+ */
+ if (amdgpu_ras_intr_triggered())
+ return 0;
+
+ if (ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED)
+ return -EACCES;
+ else if (ras_status)
+ return -EINVAL;
+
+ return 0;
+}
+
+int psp_ras_query_address(struct psp_context *psp,
+ struct ta_ras_query_address_input *addr_in,
+ struct ta_ras_query_address_output *addr_out)
+{
+ int ret;
+
+ if (!psp->ras_context.context.initialized ||
+ !addr_in || !addr_out)
+ return -EINVAL;
+
+ ret = psp_ras_send_cmd(psp,
+ TA_RAS_COMMAND__QUERY_ADDRESS, addr_in, addr_out);
+
+ return ret;
+}
+// ras end
+
+// HDCP start
+static int psp_hdcp_initialize(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO: bypass the initialize in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ /* bypass hdcp initialization if dmu is harvested */
+ if (!amdgpu_device_has_display_hardware(psp->adev))
+ return 0;
+
+ if (!psp->hdcp_context.context.bin_desc.size_bytes ||
+ !psp->hdcp_context.context.bin_desc.start_addr) {
+ dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
+ return 0;
+ }
+
+ psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
+ psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
+
+ if (!psp->hdcp_context.context.mem_context.shared_buf) {
+ ret = psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
+ if (ret)
+ return ret;
+ }
+
+ ret = psp_ta_load(psp, &psp->hdcp_context.context);
+ if (!ret) {
+ psp->hdcp_context.context.initialized = true;
+ mutex_init(&psp->hdcp_context.mutex);
+ }
+
+ return ret;
+}
+
+int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->hdcp_context.context.initialized)
+ return 0;
+
+ return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
+}
+
+static int psp_hdcp_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO: bypass the terminate in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->hdcp_context.context.initialized)
+ return 0;
+
+ ret = psp_ta_unload(psp, &psp->hdcp_context.context);
+
+ psp->hdcp_context.context.initialized = false;
+
+ return ret;
+}
+// HDCP end
+
+// DTM start
+static int psp_dtm_initialize(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO: bypass the initialize in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ /* bypass dtm initialization if dmu is harvested */
+ if (!amdgpu_device_has_display_hardware(psp->adev))
+ return 0;
+
+ if (!psp->dtm_context.context.bin_desc.size_bytes ||
+ !psp->dtm_context.context.bin_desc.start_addr) {
+ dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
+ return 0;
+ }
+
+ psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
+ psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
+
+ if (!psp->dtm_context.context.mem_context.shared_buf) {
+ ret = psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
+ if (ret)
+ return ret;
+ }
+
+ ret = psp_ta_load(psp, &psp->dtm_context.context);
+ if (!ret) {
+ psp->dtm_context.context.initialized = true;
+ mutex_init(&psp->dtm_context.mutex);
+ }
+
+ return ret;
+}
+
+int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->dtm_context.context.initialized)
+ return 0;
+
+ return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
+}
+
+static int psp_dtm_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO: bypass the terminate in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->dtm_context.context.initialized)
+ return 0;
+
+ ret = psp_ta_unload(psp, &psp->dtm_context.context);
+
+ psp->dtm_context.context.initialized = false;
+
+ return ret;
+}
+// DTM end
+
+// RAP start
+static int psp_rap_initialize(struct psp_context *psp)
+{
+ int ret;
+ enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
+
+ /*
+ * TODO: bypass the initialize in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->rap_context.context.bin_desc.size_bytes ||
+ !psp->rap_context.context.bin_desc.start_addr) {
+ dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
+ return 0;
+ }
+
+ psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
+ psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
+
+ if (!psp->rap_context.context.mem_context.shared_buf) {
+ ret = psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
+ if (ret)
+ return ret;
+ }
+
+ ret = psp_ta_load(psp, &psp->rap_context.context);
+ if (!ret) {
+ psp->rap_context.context.initialized = true;
+ mutex_init(&psp->rap_context.mutex);
+ } else
+ return ret;
+
+ ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
+ if (ret || status != TA_RAP_STATUS__SUCCESS) {
+ psp_rap_terminate(psp);
+ /* free rap shared memory */
+ psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
+
+ dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
+ ret, status);
+
return ret;
+ }
+
+ return 0;
+}
+
+static int psp_rap_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ if (!psp->rap_context.context.initialized)
+ return 0;
+
+ ret = psp_ta_unload(psp, &psp->rap_context.context);
+
+ psp->rap_context.context.initialized = false;
+
+ return ret;
+}
+
+int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
+{
+ struct ta_rap_shared_memory *rap_cmd;
+ int ret = 0;
+
+ if (!psp->rap_context.context.initialized)
+ return 0;
+
+ if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
+ ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
+ return -EINVAL;
+
+ mutex_lock(&psp->rap_context.mutex);
- ret = psp_asd_load(psp);
+ rap_cmd = (struct ta_rap_shared_memory *)
+ psp->rap_context.context.mem_context.shared_buf;
+ memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
+
+ rap_cmd->cmd_id = ta_cmd_id;
+ rap_cmd->validation_method_id = METHOD_A;
+
+ ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context);
if (ret)
+ goto out_unlock;
+
+ if (status)
+ *status = rap_cmd->rap_status;
+
+out_unlock:
+ mutex_unlock(&psp->rap_context.mutex);
+
+ return ret;
+}
+// RAP end
+
+/* securedisplay start */
+static int psp_securedisplay_initialize(struct psp_context *psp)
+{
+ int ret;
+ struct ta_securedisplay_cmd *securedisplay_cmd;
+
+ /*
+ * TODO: bypass the initialize in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ /* bypass securedisplay initialization if dmu is harvested */
+ if (!amdgpu_device_has_display_hardware(psp->adev))
+ return 0;
+
+ if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
+ !psp->securedisplay_context.context.bin_desc.start_addr) {
+ dev_info(psp->adev->dev,
+ "SECUREDISPLAY: optional securedisplay ta ucode is not available\n");
+ return 0;
+ }
+
+ psp->securedisplay_context.context.mem_context.shared_mem_size =
+ PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
+ psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
+
+ if (!psp->securedisplay_context.context.initialized) {
+ ret = psp_ta_init_shared_buf(psp,
+ &psp->securedisplay_context.context.mem_context);
+ if (ret)
+ return ret;
+ }
+
+ ret = psp_ta_load(psp, &psp->securedisplay_context.context);
+ if (!ret && !psp->securedisplay_context.context.resp_status) {
+ psp->securedisplay_context.context.initialized = true;
+ mutex_init(&psp->securedisplay_context.mutex);
+ } else
return ret;
+ mutex_lock(&psp->securedisplay_context.mutex);
+
+ psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+ TA_SECUREDISPLAY_COMMAND__QUERY_TA);
+
+ ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
+
+ mutex_unlock(&psp->securedisplay_context.mutex);
+
+ if (ret) {
+ psp_securedisplay_terminate(psp);
+ /* free securedisplay shared memory */
+ psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
+ dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
+ return -EINVAL;
+ }
+
+ if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
+ psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
+ dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n",
+ securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
+ /* don't try again */
+ psp->securedisplay_context.context.bin_desc.size_bytes = 0;
+ }
+
return 0;
}
-static int psp_np_fw_load(struct psp_context *psp)
+static int psp_securedisplay_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * TODO:bypass the terminate in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->securedisplay_context.context.initialized)
+ return 0;
+
+ ret = psp_ta_unload(psp, &psp->securedisplay_context.context);
+
+ psp->securedisplay_context.context.initialized = false;
+
+ return ret;
+}
+
+int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+ int ret;
+
+ if (!psp->securedisplay_context.context.initialized)
+ return -EINVAL;
+
+ if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
+ ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC &&
+ ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC_V2)
+ return -EINVAL;
+
+ ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
+
+ return ret;
+}
+/* SECUREDISPLAY end */
+
+int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
+{
+ struct psp_context *psp = &adev->psp;
+ int ret = 0;
+
+ if (!amdgpu_sriov_vf(adev) && psp->funcs && psp->funcs->wait_for_bootloader != NULL)
+ ret = psp->funcs->wait_for_bootloader(psp);
+
+ return ret;
+}
+
+bool amdgpu_psp_get_ras_capability(struct psp_context *psp)
+{
+ if (psp->funcs &&
+ psp->funcs->get_ras_capability) {
+ return psp->funcs->get_ras_capability(psp);
+ } else {
+ return false;
+ }
+}
+
+bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev)
+{
+ struct psp_context *psp = &adev->psp;
+
+ if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
+ return false;
+
+ if (psp->funcs && psp->funcs->is_reload_needed)
+ return psp->funcs->is_reload_needed(psp);
+
+ return false;
+}
+
+static void psp_update_gpu_addresses(struct amdgpu_device *adev)
+{
+ struct psp_context *psp = &adev->psp;
+
+ if (psp->cmd_buf_bo && psp->cmd_buf_mem) {
+ psp->fw_pri_mc_addr = amdgpu_bo_fb_aper_addr(psp->fw_pri_bo);
+ psp->fence_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->fence_buf_bo);
+ psp->cmd_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->cmd_buf_bo);
+ }
+ if (adev->firmware.rbuf && psp->km_ring.ring_mem)
+ psp->km_ring.ring_mem_mc_addr = amdgpu_bo_fb_aper_addr(adev->firmware.rbuf);
+}
+
+static int psp_hw_start(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ int ret;
+
+ if (amdgpu_virt_xgmi_migrate_enabled(adev))
+ psp_update_gpu_addresses(adev);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ if ((is_psp_fw_valid(psp->kdb)) &&
+ (psp->funcs->bootloader_load_kdb != NULL)) {
+ ret = psp_bootloader_load_kdb(psp);
+ if (ret) {
+ dev_err(adev->dev, "PSP load kdb failed!\n");
+ return ret;
+ }
+ }
+
+ if ((is_psp_fw_valid(psp->spl)) &&
+ (psp->funcs->bootloader_load_spl != NULL)) {
+ ret = psp_bootloader_load_spl(psp);
+ if (ret) {
+ dev_err(adev->dev, "PSP load spl failed!\n");
+ return ret;
+ }
+ }
+
+ if ((is_psp_fw_valid(psp->sys)) &&
+ (psp->funcs->bootloader_load_sysdrv != NULL)) {
+ ret = psp_bootloader_load_sysdrv(psp);
+ if (ret) {
+ dev_err(adev->dev, "PSP load sys drv failed!\n");
+ return ret;
+ }
+ }
+
+ if ((is_psp_fw_valid(psp->soc_drv)) &&
+ (psp->funcs->bootloader_load_soc_drv != NULL)) {
+ ret = psp_bootloader_load_soc_drv(psp);
+ if (ret) {
+ dev_err(adev->dev, "PSP load soc drv failed!\n");
+ return ret;
+ }
+ }
+
+ if ((is_psp_fw_valid(psp->intf_drv)) &&
+ (psp->funcs->bootloader_load_intf_drv != NULL)) {
+ ret = psp_bootloader_load_intf_drv(psp);
+ if (ret) {
+ dev_err(adev->dev, "PSP load intf drv failed!\n");
+ return ret;
+ }
+ }
+
+ if ((is_psp_fw_valid(psp->dbg_drv)) &&
+ (psp->funcs->bootloader_load_dbg_drv != NULL)) {
+ ret = psp_bootloader_load_dbg_drv(psp);
+ if (ret) {
+ dev_err(adev->dev, "PSP load dbg drv failed!\n");
+ return ret;
+ }
+ }
+
+ if ((is_psp_fw_valid(psp->ras_drv)) &&
+ (psp->funcs->bootloader_load_ras_drv != NULL)) {
+ ret = psp_bootloader_load_ras_drv(psp);
+ if (ret) {
+ dev_err(adev->dev, "PSP load ras_drv failed!\n");
+ return ret;
+ }
+ }
+
+ if ((is_psp_fw_valid(psp->ipkeymgr_drv)) &&
+ (psp->funcs->bootloader_load_ipkeymgr_drv != NULL)) {
+ ret = psp_bootloader_load_ipkeymgr_drv(psp);
+ if (ret) {
+ dev_err(adev->dev, "PSP load ipkeymgr_drv failed!\n");
+ return ret;
+ }
+ }
+
+ if ((is_psp_fw_valid(psp->spdm_drv)) &&
+ (psp->funcs->bootloader_load_spdm_drv != NULL)) {
+ ret = psp_bootloader_load_spdm_drv(psp);
+ if (ret) {
+ dev_err(adev->dev, "PSP load spdm_drv failed!\n");
+ return ret;
+ }
+ }
+
+ if ((is_psp_fw_valid(psp->sos)) &&
+ (psp->funcs->bootloader_load_sos != NULL)) {
+ ret = psp_bootloader_load_sos(psp);
+ if (ret) {
+ dev_err(adev->dev, "PSP load sos failed!\n");
+ return ret;
+ }
+ }
+ }
+
+ ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
+ if (ret) {
+ dev_err(adev->dev, "PSP create ring failed!\n");
+ return ret;
+ }
+
+ if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ ret = psp_update_fw_reservation(psp);
+ if (ret) {
+ dev_err(adev->dev, "update fw reservation failed!\n");
+ return ret;
+ }
+ }
+
+ if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
+ goto skip_pin_bo;
+
+ if (!psp->boot_time_tmr || psp->autoload_supported) {
+ ret = psp_tmr_init(psp);
+ if (ret) {
+ dev_err(adev->dev, "PSP tmr init failed!\n");
+ return ret;
+ }
+ }
+
+skip_pin_bo:
+ /*
+ * For ASICs with DF Cstate management centralized
+ * to PMFW, TMR setup should be performed after PMFW
+ * loaded and before other non-psp firmware loaded.
+ */
+ if (psp->pmfw_centralized_cstate_management) {
+ ret = psp_load_smu_fw(psp);
+ if (ret)
+ return ret;
+ }
+
+ if (!psp->boot_time_tmr || !psp->autoload_supported) {
+ ret = psp_tmr_load(psp);
+ if (ret) {
+ dev_err(adev->dev, "PSP load tmr failed!\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
+ enum psp_gfx_fw_type *type)
+{
+ switch (ucode->ucode_id) {
+ case AMDGPU_UCODE_ID_CAP:
+ *type = GFX_FW_TYPE_CAP;
+ break;
+ case AMDGPU_UCODE_ID_SDMA0:
+ *type = GFX_FW_TYPE_SDMA0;
+ break;
+ case AMDGPU_UCODE_ID_SDMA1:
+ *type = GFX_FW_TYPE_SDMA1;
+ break;
+ case AMDGPU_UCODE_ID_SDMA2:
+ *type = GFX_FW_TYPE_SDMA2;
+ break;
+ case AMDGPU_UCODE_ID_SDMA3:
+ *type = GFX_FW_TYPE_SDMA3;
+ break;
+ case AMDGPU_UCODE_ID_SDMA4:
+ *type = GFX_FW_TYPE_SDMA4;
+ break;
+ case AMDGPU_UCODE_ID_SDMA5:
+ *type = GFX_FW_TYPE_SDMA5;
+ break;
+ case AMDGPU_UCODE_ID_SDMA6:
+ *type = GFX_FW_TYPE_SDMA6;
+ break;
+ case AMDGPU_UCODE_ID_SDMA7:
+ *type = GFX_FW_TYPE_SDMA7;
+ break;
+ case AMDGPU_UCODE_ID_CP_MES:
+ *type = GFX_FW_TYPE_CP_MES;
+ break;
+ case AMDGPU_UCODE_ID_CP_MES_DATA:
+ *type = GFX_FW_TYPE_MES_STACK;
+ break;
+ case AMDGPU_UCODE_ID_CP_MES1:
+ *type = GFX_FW_TYPE_CP_MES_KIQ;
+ break;
+ case AMDGPU_UCODE_ID_CP_MES1_DATA:
+ *type = GFX_FW_TYPE_MES_KIQ_STACK;
+ break;
+ case AMDGPU_UCODE_ID_CP_CE:
+ *type = GFX_FW_TYPE_CP_CE;
+ break;
+ case AMDGPU_UCODE_ID_CP_PFP:
+ *type = GFX_FW_TYPE_CP_PFP;
+ break;
+ case AMDGPU_UCODE_ID_CP_ME:
+ *type = GFX_FW_TYPE_CP_ME;
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC1:
+ *type = GFX_FW_TYPE_CP_MEC;
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC1_JT:
+ *type = GFX_FW_TYPE_CP_MEC_ME1;
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC2:
+ *type = GFX_FW_TYPE_CP_MEC;
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC2_JT:
+ *type = GFX_FW_TYPE_CP_MEC_ME2;
+ break;
+ case AMDGPU_UCODE_ID_RLC_P:
+ *type = GFX_FW_TYPE_RLC_P;
+ break;
+ case AMDGPU_UCODE_ID_RLC_V:
+ *type = GFX_FW_TYPE_RLC_V;
+ break;
+ case AMDGPU_UCODE_ID_RLC_G:
+ *type = GFX_FW_TYPE_RLC_G;
+ break;
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
+ *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_CNTL;
+ break;
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
+ *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM;
+ break;
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
+ *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM;
+ break;
+ case AMDGPU_UCODE_ID_RLC_IRAM:
+ *type = GFX_FW_TYPE_RLC_IRAM;
+ break;
+ case AMDGPU_UCODE_ID_RLC_DRAM:
+ *type = GFX_FW_TYPE_RLC_DRAM_BOOT;
+ break;
+ case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
+ *type = GFX_FW_TYPE_GLOBAL_TAP_DELAYS;
+ break;
+ case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
+ *type = GFX_FW_TYPE_SE0_TAP_DELAYS;
+ break;
+ case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
+ *type = GFX_FW_TYPE_SE1_TAP_DELAYS;
+ break;
+ case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
+ *type = GFX_FW_TYPE_SE2_TAP_DELAYS;
+ break;
+ case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
+ *type = GFX_FW_TYPE_SE3_TAP_DELAYS;
+ break;
+ case AMDGPU_UCODE_ID_SMC:
+ *type = GFX_FW_TYPE_SMU;
+ break;
+ case AMDGPU_UCODE_ID_PPTABLE:
+ *type = GFX_FW_TYPE_PPTABLE;
+ break;
+ case AMDGPU_UCODE_ID_UVD:
+ *type = GFX_FW_TYPE_UVD;
+ break;
+ case AMDGPU_UCODE_ID_UVD1:
+ *type = GFX_FW_TYPE_UVD1;
+ break;
+ case AMDGPU_UCODE_ID_VCE:
+ *type = GFX_FW_TYPE_VCE;
+ break;
+ case AMDGPU_UCODE_ID_VCN:
+ *type = GFX_FW_TYPE_VCN;
+ break;
+ case AMDGPU_UCODE_ID_VCN1:
+ *type = GFX_FW_TYPE_VCN1;
+ break;
+ case AMDGPU_UCODE_ID_DMCU_ERAM:
+ *type = GFX_FW_TYPE_DMCU_ERAM;
+ break;
+ case AMDGPU_UCODE_ID_DMCU_INTV:
+ *type = GFX_FW_TYPE_DMCU_ISR;
+ break;
+ case AMDGPU_UCODE_ID_VCN0_RAM:
+ *type = GFX_FW_TYPE_VCN0_RAM;
+ break;
+ case AMDGPU_UCODE_ID_VCN1_RAM:
+ *type = GFX_FW_TYPE_VCN1_RAM;
+ break;
+ case AMDGPU_UCODE_ID_DMCUB:
+ *type = GFX_FW_TYPE_DMUB;
+ break;
+ case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
+ case AMDGPU_UCODE_ID_SDMA_RS64:
+ *type = GFX_FW_TYPE_SDMA_UCODE_TH0;
+ break;
+ case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
+ *type = GFX_FW_TYPE_SDMA_UCODE_TH1;
+ break;
+ case AMDGPU_UCODE_ID_IMU_I:
+ *type = GFX_FW_TYPE_IMU_I;
+ break;
+ case AMDGPU_UCODE_ID_IMU_D:
+ *type = GFX_FW_TYPE_IMU_D;
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_PFP:
+ *type = GFX_FW_TYPE_RS64_PFP;
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_ME:
+ *type = GFX_FW_TYPE_RS64_ME;
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_MEC:
+ *type = GFX_FW_TYPE_RS64_MEC;
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
+ *type = GFX_FW_TYPE_RS64_PFP_P0_STACK;
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
+ *type = GFX_FW_TYPE_RS64_PFP_P1_STACK;
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
+ *type = GFX_FW_TYPE_RS64_ME_P0_STACK;
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
+ *type = GFX_FW_TYPE_RS64_ME_P1_STACK;
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
+ *type = GFX_FW_TYPE_RS64_MEC_P0_STACK;
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
+ *type = GFX_FW_TYPE_RS64_MEC_P1_STACK;
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
+ *type = GFX_FW_TYPE_RS64_MEC_P2_STACK;
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
+ *type = GFX_FW_TYPE_RS64_MEC_P3_STACK;
+ break;
+ case AMDGPU_UCODE_ID_VPE_CTX:
+ *type = GFX_FW_TYPE_VPEC_FW1;
+ break;
+ case AMDGPU_UCODE_ID_VPE_CTL:
+ *type = GFX_FW_TYPE_VPEC_FW2;
+ break;
+ case AMDGPU_UCODE_ID_VPE:
+ *type = GFX_FW_TYPE_VPE;
+ break;
+ case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
+ *type = GFX_FW_TYPE_UMSCH_UCODE;
+ break;
+ case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
+ *type = GFX_FW_TYPE_UMSCH_DATA;
+ break;
+ case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
+ *type = GFX_FW_TYPE_UMSCH_CMD_BUFFER;
+ break;
+ case AMDGPU_UCODE_ID_P2S_TABLE:
+ *type = GFX_FW_TYPE_P2S_TABLE;
+ break;
+ case AMDGPU_UCODE_ID_JPEG_RAM:
+ *type = GFX_FW_TYPE_JPEG_RAM;
+ break;
+ case AMDGPU_UCODE_ID_ISP:
+ *type = GFX_FW_TYPE_ISP;
+ break;
+ case AMDGPU_UCODE_ID_MAXIMUM:
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void psp_print_fw_hdr(struct psp_context *psp,
+ struct amdgpu_firmware_info *ucode)
+{
+ struct amdgpu_device *adev = psp->adev;
+ struct common_firmware_header *hdr;
+
+ switch (ucode->ucode_id) {
+ case AMDGPU_UCODE_ID_SDMA0:
+ case AMDGPU_UCODE_ID_SDMA1:
+ case AMDGPU_UCODE_ID_SDMA2:
+ case AMDGPU_UCODE_ID_SDMA3:
+ case AMDGPU_UCODE_ID_SDMA4:
+ case AMDGPU_UCODE_ID_SDMA5:
+ case AMDGPU_UCODE_ID_SDMA6:
+ case AMDGPU_UCODE_ID_SDMA7:
+ hdr = (struct common_firmware_header *)
+ adev->sdma.instance[ucode->ucode_id - AMDGPU_UCODE_ID_SDMA0].fw->data;
+ amdgpu_ucode_print_sdma_hdr(hdr);
+ break;
+ case AMDGPU_UCODE_ID_CP_CE:
+ hdr = (struct common_firmware_header *)adev->gfx.ce_fw->data;
+ amdgpu_ucode_print_gfx_hdr(hdr);
+ break;
+ case AMDGPU_UCODE_ID_CP_PFP:
+ hdr = (struct common_firmware_header *)adev->gfx.pfp_fw->data;
+ amdgpu_ucode_print_gfx_hdr(hdr);
+ break;
+ case AMDGPU_UCODE_ID_CP_ME:
+ hdr = (struct common_firmware_header *)adev->gfx.me_fw->data;
+ amdgpu_ucode_print_gfx_hdr(hdr);
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC1:
+ hdr = (struct common_firmware_header *)adev->gfx.mec_fw->data;
+ amdgpu_ucode_print_gfx_hdr(hdr);
+ break;
+ case AMDGPU_UCODE_ID_RLC_G:
+ hdr = (struct common_firmware_header *)adev->gfx.rlc_fw->data;
+ amdgpu_ucode_print_rlc_hdr(hdr);
+ break;
+ case AMDGPU_UCODE_ID_SMC:
+ hdr = (struct common_firmware_header *)adev->pm.fw->data;
+ amdgpu_ucode_print_smc_hdr(hdr);
+ break;
+ default:
+ break;
+ }
+}
+
+static int psp_prep_load_ip_fw_cmd_buf(struct psp_context *psp,
+ struct amdgpu_firmware_info *ucode,
+ struct psp_gfx_cmd_resp *cmd)
+{
+ int ret;
+ uint64_t fw_mem_mc_addr = ucode->mc_addr;
+
+ cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
+ cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
+ cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
+ cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
+
+ ret = psp_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
+ if (ret)
+ dev_err(psp->adev->dev, "Unknown firmware type\n");
+
+ return ret;
+}
+
+int psp_execute_ip_fw_load(struct psp_context *psp,
+ struct amdgpu_firmware_info *ucode)
+{
+ int ret = 0;
+ struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
+
+ ret = psp_prep_load_ip_fw_cmd_buf(psp, ucode, cmd);
+ if (!ret) {
+ ret = psp_cmd_submit_buf(psp, ucode, cmd,
+ psp->fence_buf_mc_addr);
+ }
+
+ release_psp_cmd_buf(psp);
+
+ return ret;
+}
+
+static int psp_load_p2s_table(struct psp_context *psp)
+{
+ int ret;
+ struct amdgpu_device *adev = psp->adev;
+ struct amdgpu_firmware_info *ucode =
+ &adev->firmware.ucode[AMDGPU_UCODE_ID_P2S_TABLE];
+
+ if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
+ (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
+ return 0;
+
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14)) {
+ uint32_t supp_vers = adev->flags & AMD_IS_APU ? 0x0036013D :
+ 0x0036003C;
+ if (psp->sos.fw_version < supp_vers)
+ return 0;
+ }
+
+ if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ ret = psp_execute_ip_fw_load(psp, ucode);
+
+ return ret;
+}
+
+static int psp_load_smu_fw(struct psp_context *psp)
+{
+ int ret;
+ struct amdgpu_device *adev = psp->adev;
+ struct amdgpu_firmware_info *ucode =
+ &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
+ struct amdgpu_ras *ras = psp->ras_context.ras;
+
+ /*
+ * Skip SMU FW reloading in case of using BACO for runpm only,
+ * as SMU is always alive.
+ */
+ if (adev->in_runpm && ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
+ (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)))
+ return 0;
+
+ if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if ((amdgpu_in_reset(adev) && ras && adev->ras_enabled &&
+ (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 4) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(11, 0, 2)))) {
+ ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
+ if (ret)
+ dev_err(adev->dev, "Failed to set MP1 state prepare for reload\n");
+ }
+
+ ret = psp_execute_ip_fw_load(psp, ucode);
+
+ if (ret)
+ dev_err(adev->dev, "PSP load smu failed!\n");
+
+ return ret;
+}
+
+static bool fw_load_skip_check(struct psp_context *psp,
+ struct amdgpu_firmware_info *ucode)
+{
+ if (!ucode->fw || !ucode->ucode_size)
+ return true;
+
+ if (ucode->ucode_id == AMDGPU_UCODE_ID_P2S_TABLE)
+ return true;
+
+ if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
+ (psp_smu_reload_quirk(psp) ||
+ psp->autoload_supported ||
+ psp->pmfw_centralized_cstate_management))
+ return true;
+
+ if (amdgpu_sriov_vf(psp->adev) &&
+ amdgpu_virt_fw_load_skip_check(psp->adev, ucode->ucode_id))
+ return true;
+
+ if (psp->autoload_supported &&
+ (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
+ ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
+ /* skip mec JT when autoload is enabled */
+ return true;
+
+ return false;
+}
+
+int psp_load_fw_list(struct psp_context *psp,
+ struct amdgpu_firmware_info **ucode_list, int ucode_count)
+{
+ int ret = 0, i;
+ struct amdgpu_firmware_info *ucode;
+
+ for (i = 0; i < ucode_count; ++i) {
+ ucode = ucode_list[i];
+ psp_print_fw_hdr(psp, ucode);
+ ret = psp_execute_ip_fw_load(psp, ucode);
+ if (ret)
+ return ret;
+ }
+ return ret;
+}
+
+static int psp_load_non_psp_fw(struct psp_context *psp)
{
int i, ret;
struct amdgpu_firmware_info *ucode;
- struct amdgpu_device* adev = psp->adev;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (psp->autoload_supported &&
+ !psp->pmfw_centralized_cstate_management) {
+ ret = psp_load_smu_fw(psp);
+ if (ret)
+ return ret;
+ }
+
+ /* Load P2S table first if it's available */
+ psp_load_p2s_table(psp);
for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i];
- if (!ucode->fw)
- continue;
if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
- psp_smu_reload_quirk(psp))
+ !fw_load_skip_check(psp, ucode)) {
+ ret = psp_load_smu_fw(psp);
+ if (ret)
+ return ret;
continue;
- if (amdgpu_sriov_vf(adev) &&
- (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G))
- /*skip ucode loading in SRIOV VF */
+ }
+
+ if (fw_load_skip_check(psp, ucode))
continue;
- ret = psp_prep_cmd_buf(ucode, psp->cmd);
- if (ret)
- return ret;
+ if (psp->autoload_supported &&
+ (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
+ IP_VERSION(11, 0, 7) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) ==
+ IP_VERSION(11, 0, 11) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) ==
+ IP_VERSION(11, 0, 12)) &&
+ (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
+ ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
+ ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
+ /* PSP only receive one SDMA fw for sienna_cichlid,
+ * as all four sdma fw are same
+ */
+ continue;
- ret = psp_cmd_submit_buf(psp, ucode, psp->cmd,
- psp->fence_buf_mc_addr, i + 3);
+ psp_print_fw_hdr(psp, ucode);
+
+ ret = psp_execute_ip_fw_load(psp, ucode);
if (ret)
return ret;
-#if 0
- /* check if firmware loaded sucessfully */
- if (!amdgpu_psp_check_fw_loading_status(adev, i))
- return -EINVAL;
-#endif
+ /* Start rlc autoload after psp received all the gfx firmware */
+ if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
+ adev->virt.autoload_ucode_id : AMDGPU_UCODE_ID_RLC_G)) {
+ ret = psp_rlc_autoload_start(psp);
+ if (ret) {
+ dev_err(adev->dev, "Failed to start rlc autoload\n");
+ return ret;
+ }
+ }
}
return 0;
@@ -319,88 +3098,107 @@ static int psp_load_fw(struct amdgpu_device *adev)
{
int ret;
struct psp_context *psp = &adev->psp;
- struct psp_gfx_cmd_resp *cmd;
- cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
+ if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
+ /* should not destroy ring, only stop */
+ psp_ring_stop(psp, PSP_RING_TYPE__KM);
+ } else {
+ memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
- psp->cmd = cmd;
+ ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
+ if (ret) {
+ dev_err(adev->dev, "PSP ring init failed!\n");
+ goto failed;
+ }
+ }
- ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
- AMDGPU_GEM_DOMAIN_GTT,
- &psp->fw_pri_bo,
- &psp->fw_pri_mc_addr,
- &psp->fw_pri_buf);
+ ret = psp_hw_start(psp);
if (ret)
goto failed;
- ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &psp->fence_buf_bo,
- &psp->fence_buf_mc_addr,
- &psp->fence_buf);
+ ret = psp_load_non_psp_fw(psp);
if (ret)
- goto failed_mem1;
+ goto failed1;
- memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
+ ret = psp_asd_initialize(psp);
+ if (ret) {
+ dev_err(adev->dev, "PSP load asd failed!\n");
+ goto failed1;
+ }
- ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
- if (ret)
- goto failed_mem1;
+ ret = psp_rl_load(adev);
+ if (ret) {
+ dev_err(adev->dev, "PSP load RL failed!\n");
+ goto failed1;
+ }
- ret = psp_tmr_init(psp);
- if (ret)
- goto failed_mem;
+ if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) {
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ ret = psp_xgmi_initialize(psp, false, true);
+ /* Warning the XGMI seesion initialize failure
+ * Instead of stop driver initialization
+ */
+ if (ret)
+ dev_err(psp->adev->dev,
+ "XGMI: Failed to initialize XGMI session\n");
+ }
+ }
- ret = psp_asd_init(psp);
- if (ret)
- goto failed_mem;
+ if (psp->ta_fw) {
+ ret = psp_ras_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "RAS: Failed to initialize RAS\n");
- ret = psp_hw_start(psp);
- if (ret)
- goto failed_mem;
+ ret = psp_hdcp_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "HDCP: Failed to initialize HDCP\n");
- ret = psp_np_fw_load(psp);
- if (ret)
- goto failed_mem;
+ ret = psp_dtm_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "DTM: Failed to initialize DTM\n");
- kfree(cmd);
+ ret = psp_rap_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "RAP: Failed to initialize RAP\n");
+
+ ret = psp_securedisplay_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
+ }
return 0;
-failed_mem:
- amdgpu_bo_free_kernel(&psp->fence_buf_bo,
- &psp->fence_buf_mc_addr, &psp->fence_buf);
-failed_mem1:
- amdgpu_bo_free_kernel(&psp->fw_pri_bo,
- &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
+failed1:
+ psp_free_shared_bufs(psp);
failed:
- kfree(cmd);
+ /*
+ * all cleanup jobs (xgmi terminate, ras terminate,
+ * ring destroy, cmd/fence/fw buffers destory,
+ * psp->cmd destory) are delayed to psp_hw_fini
+ */
+ psp_ring_destroy(psp, PSP_RING_TYPE__KM);
return ret;
}
-static int psp_hw_init(void *handle)
+static int psp_hw_init(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
- return 0;
+ struct amdgpu_device *adev = ip_block->adev;
mutex_lock(&adev->firmware.mutex);
- /*
- * This sequence is just used on hw_init only once, no need on
- * resume.
- */
+
ret = amdgpu_ucode_init_bo(adev);
if (ret)
goto failed;
ret = psp_load_fw(adev);
if (ret) {
- DRM_ERROR("PSP firmware loading failed\n");
+ dev_err(adev->dev, "PSP firmware loading failed\n");
goto failed;
}
@@ -413,132 +3211,1311 @@ failed:
return -EINVAL;
}
-static int psp_hw_fini(void *handle)
+static int psp_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct psp_context *psp = &adev->psp;
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
- return 0;
-
- amdgpu_ucode_fini_bo(adev);
-
- psp_ring_destroy(psp, PSP_RING_TYPE__KM);
+ if (psp->ta_fw) {
+ psp_ras_terminate(psp);
+ psp_securedisplay_terminate(psp);
+ psp_rap_terminate(psp);
+ psp_dtm_terminate(psp);
+ psp_hdcp_terminate(psp);
- if (psp->tmr_buf)
- amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
+ if (adev->gmc.xgmi.num_physical_nodes > 1)
+ psp_xgmi_terminate(psp);
+ }
- if (psp->fw_pri_buf)
- amdgpu_bo_free_kernel(&psp->fw_pri_bo,
- &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
+ psp_asd_terminate(psp);
+ psp_tmr_terminate(psp);
- if (psp->fence_buf_bo)
- amdgpu_bo_free_kernel(&psp->fence_buf_bo,
- &psp->fence_buf_mc_addr, &psp->fence_buf);
+ psp_ring_destroy(psp, PSP_RING_TYPE__KM);
return 0;
}
-static int psp_suspend(void *handle)
+static int psp_suspend(struct amdgpu_ip_block *ip_block)
{
- return 0;
+ int ret = 0;
+ struct amdgpu_device *adev = ip_block->adev;
+ struct psp_context *psp = &adev->psp;
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1 &&
+ psp->xgmi_context.context.initialized) {
+ ret = psp_xgmi_terminate(psp);
+ if (ret) {
+ dev_err(adev->dev, "Failed to terminate xgmi ta\n");
+ goto out;
+ }
+ }
+
+ if (psp->ta_fw) {
+ ret = psp_ras_terminate(psp);
+ if (ret) {
+ dev_err(adev->dev, "Failed to terminate ras ta\n");
+ goto out;
+ }
+ ret = psp_hdcp_terminate(psp);
+ if (ret) {
+ dev_err(adev->dev, "Failed to terminate hdcp ta\n");
+ goto out;
+ }
+ ret = psp_dtm_terminate(psp);
+ if (ret) {
+ dev_err(adev->dev, "Failed to terminate dtm ta\n");
+ goto out;
+ }
+ ret = psp_rap_terminate(psp);
+ if (ret) {
+ dev_err(adev->dev, "Failed to terminate rap ta\n");
+ goto out;
+ }
+ ret = psp_securedisplay_terminate(psp);
+ if (ret) {
+ dev_err(adev->dev, "Failed to terminate securedisplay ta\n");
+ goto out;
+ }
+ }
+
+ ret = psp_asd_terminate(psp);
+ if (ret) {
+ dev_err(adev->dev, "Failed to terminate asd\n");
+ goto out;
+ }
+
+ ret = psp_tmr_terminate(psp);
+ if (ret) {
+ dev_err(adev->dev, "Failed to terminate tmr\n");
+ goto out;
+ }
+
+ ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
+ if (ret)
+ dev_err(adev->dev, "PSP ring stop failed\n");
+
+out:
+ return ret;
}
-static int psp_resume(void *handle)
+static int psp_resume(struct amdgpu_ip_block *ip_block)
{
int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
struct psp_context *psp = &adev->psp;
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
- return 0;
+ dev_info(adev->dev, "PSP is resuming...\n");
- DRM_INFO("PSP is resuming...\n");
+ if (psp->mem_train_ctx.enable_mem_training) {
+ ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME);
+ if (ret) {
+ dev_err(adev->dev, "Failed to process memory training!\n");
+ return ret;
+ }
+ }
mutex_lock(&adev->firmware.mutex);
+ ret = amdgpu_ucode_init_bo(adev);
+ if (ret)
+ goto failed;
+
ret = psp_hw_start(psp);
if (ret)
goto failed;
- ret = psp_np_fw_load(psp);
+ ret = psp_load_non_psp_fw(psp);
if (ret)
goto failed;
+ ret = psp_asd_initialize(psp);
+ if (ret) {
+ dev_err(adev->dev, "PSP load asd failed!\n");
+ goto failed;
+ }
+
+ ret = psp_rl_load(adev);
+ if (ret) {
+ dev_err(adev->dev, "PSP load RL failed!\n");
+ goto failed;
+ }
+
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ ret = psp_xgmi_initialize(psp, false, true);
+ /* Warning the XGMI seesion initialize failure
+ * Instead of stop driver initialization
+ */
+ if (ret)
+ dev_err(psp->adev->dev,
+ "XGMI: Failed to initialize XGMI session\n");
+ }
+
+ if (psp->ta_fw) {
+ ret = psp_ras_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "RAS: Failed to initialize RAS\n");
+
+ ret = psp_hdcp_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "HDCP: Failed to initialize HDCP\n");
+
+ ret = psp_dtm_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "DTM: Failed to initialize DTM\n");
+
+ ret = psp_rap_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "RAP: Failed to initialize RAP\n");
+
+ ret = psp_securedisplay_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n");
+ }
+
mutex_unlock(&adev->firmware.mutex);
return 0;
failed:
- DRM_ERROR("PSP resume failed\n");
+ dev_err(adev->dev, "PSP resume failed\n");
mutex_unlock(&adev->firmware.mutex);
return ret;
}
-static bool psp_check_fw_loading_status(struct amdgpu_device *adev,
- enum AMDGPU_UCODE_ID ucode_type)
+int psp_gpu_reset(struct amdgpu_device *adev)
{
- struct amdgpu_firmware_info *ucode = NULL;
+ int ret;
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- DRM_INFO("firmware is not loaded by PSP\n");
- return true;
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ return 0;
+
+ mutex_lock(&adev->psp.mutex);
+ ret = psp_mode1_reset(&adev->psp);
+ mutex_unlock(&adev->psp.mutex);
+
+ return ret;
+}
+
+int psp_rlc_autoload_start(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
+
+ cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+
+ release_psp_cmd_buf(psp);
+
+ return ret;
+}
+
+int psp_ring_cmd_submit(struct psp_context *psp,
+ uint64_t cmd_buf_mc_addr,
+ uint64_t fence_mc_addr,
+ int index)
+{
+ unsigned int psp_write_ptr_reg = 0;
+ struct psp_gfx_rb_frame *write_frame;
+ struct psp_ring *ring = &psp->km_ring;
+ struct psp_gfx_rb_frame *ring_buffer_start = ring->ring_mem;
+ struct psp_gfx_rb_frame *ring_buffer_end = ring_buffer_start +
+ ring->ring_size / sizeof(struct psp_gfx_rb_frame) - 1;
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t ring_size_dw = ring->ring_size / 4;
+ uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
+
+ /* KM (GPCOM) prepare write pointer */
+ psp_write_ptr_reg = psp_ring_get_wptr(psp);
+
+ /* Update KM RB frame pointer to new frame */
+ /* write_frame ptr increments by size of rb_frame in bytes */
+ /* psp_write_ptr_reg increments by size of rb_frame in DWORDs */
+ if ((psp_write_ptr_reg % ring_size_dw) == 0)
+ write_frame = ring_buffer_start;
+ else
+ write_frame = ring_buffer_start + (psp_write_ptr_reg / rb_frame_size_dw);
+ /* Check invalid write_frame ptr address */
+ if ((write_frame < ring_buffer_start) || (ring_buffer_end < write_frame)) {
+ dev_err(adev->dev,
+ "ring_buffer_start = %p; ring_buffer_end = %p; write_frame = %p\n",
+ ring_buffer_start, ring_buffer_end, write_frame);
+ dev_err(adev->dev,
+ "write_frame is pointing to address out of bounds\n");
+ return -EINVAL;
}
- if (!adev->firmware.fw_size)
- return false;
+ /* Initialize KM RB frame */
+ memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
- ucode = &adev->firmware.ucode[ucode_type];
- if (!ucode->fw || !ucode->ucode_size)
- return false;
+ /* Update KM RB frame */
+ write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
+ write_frame->cmd_buf_addr_lo = lower_32_bits(cmd_buf_mc_addr);
+ write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr);
+ write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr);
+ write_frame->fence_value = index;
+ amdgpu_device_flush_hdp(adev, NULL);
- return psp_compare_sram_data(&adev->psp, ucode, ucode_type);
+ /* Update the write Pointer in DWORDs */
+ psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
+ psp_ring_set_wptr(psp, psp_write_ptr_reg);
+ return 0;
}
-static int psp_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
+int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
{
+ struct amdgpu_device *adev = psp->adev;
+ const struct psp_firmware_header_v1_0 *asd_hdr;
+ int err = 0;
+
+ err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_asd.bin", chip_name);
+ if (err)
+ goto out;
+
+ asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
+ adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
+ adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
+ adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
+ adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
+ le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
return 0;
+out:
+ amdgpu_ucode_release(&adev->psp.asd_fw);
+ return err;
}
-static int psp_set_powergating_state(void *handle,
+int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
+{
+ struct amdgpu_device *adev = psp->adev;
+ const struct psp_firmware_header_v1_0 *toc_hdr;
+ int err = 0;
+
+ err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_toc.bin", chip_name);
+ if (err)
+ goto out;
+
+ toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
+ adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
+ adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
+ adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
+ adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
+ le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
+ return 0;
+out:
+ amdgpu_ucode_release(&adev->psp.toc_fw);
+ return err;
+}
+
+static int parse_sos_bin_descriptor(struct psp_context *psp,
+ const struct psp_fw_bin_desc *desc,
+ const struct psp_firmware_header_v2_0 *sos_hdr)
+{
+ uint8_t *ucode_start_addr = NULL;
+
+ if (!psp || !desc || !sos_hdr)
+ return -EINVAL;
+
+ ucode_start_addr = (uint8_t *)sos_hdr +
+ le32_to_cpu(desc->offset_bytes) +
+ le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
+
+ switch (desc->fw_type) {
+ case PSP_FW_TYPE_PSP_SOS:
+ psp->sos.fw_version = le32_to_cpu(desc->fw_version);
+ psp->sos.feature_version = le32_to_cpu(desc->fw_version);
+ psp->sos.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->sos.start_addr = ucode_start_addr;
+ break;
+ case PSP_FW_TYPE_PSP_SYS_DRV:
+ psp->sys.fw_version = le32_to_cpu(desc->fw_version);
+ psp->sys.feature_version = le32_to_cpu(desc->fw_version);
+ psp->sys.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->sys.start_addr = ucode_start_addr;
+ break;
+ case PSP_FW_TYPE_PSP_KDB:
+ psp->kdb.fw_version = le32_to_cpu(desc->fw_version);
+ psp->kdb.feature_version = le32_to_cpu(desc->fw_version);
+ psp->kdb.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->kdb.start_addr = ucode_start_addr;
+ break;
+ case PSP_FW_TYPE_PSP_TOC:
+ psp->toc.fw_version = le32_to_cpu(desc->fw_version);
+ psp->toc.feature_version = le32_to_cpu(desc->fw_version);
+ psp->toc.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->toc.start_addr = ucode_start_addr;
+ break;
+ case PSP_FW_TYPE_PSP_SPL:
+ psp->spl.fw_version = le32_to_cpu(desc->fw_version);
+ psp->spl.feature_version = le32_to_cpu(desc->fw_version);
+ psp->spl.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->spl.start_addr = ucode_start_addr;
+ break;
+ case PSP_FW_TYPE_PSP_RL:
+ psp->rl.fw_version = le32_to_cpu(desc->fw_version);
+ psp->rl.feature_version = le32_to_cpu(desc->fw_version);
+ psp->rl.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->rl.start_addr = ucode_start_addr;
+ break;
+ case PSP_FW_TYPE_PSP_SOC_DRV:
+ psp->soc_drv.fw_version = le32_to_cpu(desc->fw_version);
+ psp->soc_drv.feature_version = le32_to_cpu(desc->fw_version);
+ psp->soc_drv.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->soc_drv.start_addr = ucode_start_addr;
+ break;
+ case PSP_FW_TYPE_PSP_INTF_DRV:
+ psp->intf_drv.fw_version = le32_to_cpu(desc->fw_version);
+ psp->intf_drv.feature_version = le32_to_cpu(desc->fw_version);
+ psp->intf_drv.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->intf_drv.start_addr = ucode_start_addr;
+ break;
+ case PSP_FW_TYPE_PSP_DBG_DRV:
+ psp->dbg_drv.fw_version = le32_to_cpu(desc->fw_version);
+ psp->dbg_drv.feature_version = le32_to_cpu(desc->fw_version);
+ psp->dbg_drv.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->dbg_drv.start_addr = ucode_start_addr;
+ break;
+ case PSP_FW_TYPE_PSP_RAS_DRV:
+ psp->ras_drv.fw_version = le32_to_cpu(desc->fw_version);
+ psp->ras_drv.feature_version = le32_to_cpu(desc->fw_version);
+ psp->ras_drv.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->ras_drv.start_addr = ucode_start_addr;
+ break;
+ case PSP_FW_TYPE_PSP_IPKEYMGR_DRV:
+ psp->ipkeymgr_drv.fw_version = le32_to_cpu(desc->fw_version);
+ psp->ipkeymgr_drv.feature_version = le32_to_cpu(desc->fw_version);
+ psp->ipkeymgr_drv.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->ipkeymgr_drv.start_addr = ucode_start_addr;
+ break;
+ case PSP_FW_TYPE_PSP_SPDM_DRV:
+ psp->spdm_drv.fw_version = le32_to_cpu(desc->fw_version);
+ psp->spdm_drv.feature_version = le32_to_cpu(desc->fw_version);
+ psp->spdm_drv.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->spdm_drv.start_addr = ucode_start_addr;
+ break;
+ default:
+ dev_warn(psp->adev->dev, "Unsupported PSP FW type: %d\n", desc->fw_type);
+ break;
+ }
+
+ return 0;
+}
+
+static int psp_init_sos_base_fw(struct amdgpu_device *adev)
+{
+ const struct psp_firmware_header_v1_0 *sos_hdr;
+ const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
+ uint8_t *ucode_array_start_addr;
+
+ sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
+ ucode_array_start_addr = (uint8_t *)sos_hdr +
+ le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
+
+ if (adev->gmc.xgmi.connected_to_cpu ||
+ (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 2))) {
+ adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
+ adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
+
+ adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr->sos.offset_bytes);
+ adev->psp.sys.start_addr = ucode_array_start_addr;
+
+ adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
+ adev->psp.sos.start_addr = ucode_array_start_addr +
+ le32_to_cpu(sos_hdr->sos.offset_bytes);
+ } else {
+ /* Load alternate PSP SOS FW */
+ sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
+
+ adev->psp.sos.fw_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
+ adev->psp.sos.feature_version = le32_to_cpu(sos_hdr_v1_3->sos_aux.fw_version);
+
+ adev->psp.sys.size_bytes = le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.size_bytes);
+ adev->psp.sys.start_addr = ucode_array_start_addr +
+ le32_to_cpu(sos_hdr_v1_3->sys_drv_aux.offset_bytes);
+
+ adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
+ adev->psp.sos.start_addr = ucode_array_start_addr +
+ le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
+ }
+
+ if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
+ dev_warn(adev->dev, "PSP SOS FW not available");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
+{
+ struct amdgpu_device *adev = psp->adev;
+ const struct psp_firmware_header_v1_0 *sos_hdr;
+ const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
+ const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
+ const struct psp_firmware_header_v1_3 *sos_hdr_v1_3;
+ const struct psp_firmware_header_v2_0 *sos_hdr_v2_0;
+ const struct psp_firmware_header_v2_1 *sos_hdr_v2_1;
+ int fw_index, fw_bin_count, start_index = 0;
+ const struct psp_fw_bin_desc *fw_bin;
+ uint8_t *ucode_array_start_addr;
+ int err = 0;
+
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_sos_kicker.bin", chip_name);
+ else
+ err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_sos.bin", chip_name);
+ if (err)
+ goto out;
+
+ sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
+ ucode_array_start_addr = (uint8_t *)sos_hdr +
+ le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
+ amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
+
+ switch (sos_hdr->header.header_version_major) {
+ case 1:
+ err = psp_init_sos_base_fw(adev);
+ if (err)
+ goto out;
+
+ if (sos_hdr->header.header_version_minor == 1) {
+ sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
+ adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_1->toc.size_bytes);
+ adev->psp.toc.start_addr = (uint8_t *)adev->psp.sys.start_addr +
+ le32_to_cpu(sos_hdr_v1_1->toc.offset_bytes);
+ adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_1->kdb.size_bytes);
+ adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
+ le32_to_cpu(sos_hdr_v1_1->kdb.offset_bytes);
+ }
+ if (sos_hdr->header.header_version_minor == 2) {
+ sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
+ adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_2->kdb.size_bytes);
+ adev->psp.kdb.start_addr = (uint8_t *)adev->psp.sys.start_addr +
+ le32_to_cpu(sos_hdr_v1_2->kdb.offset_bytes);
+ }
+ if (sos_hdr->header.header_version_minor == 3) {
+ sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
+ adev->psp.toc.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.toc.size_bytes);
+ adev->psp.toc.start_addr = ucode_array_start_addr +
+ le32_to_cpu(sos_hdr_v1_3->v1_1.toc.offset_bytes);
+ adev->psp.kdb.size_bytes = le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.size_bytes);
+ adev->psp.kdb.start_addr = ucode_array_start_addr +
+ le32_to_cpu(sos_hdr_v1_3->v1_1.kdb.offset_bytes);
+ adev->psp.spl.size_bytes = le32_to_cpu(sos_hdr_v1_3->spl.size_bytes);
+ adev->psp.spl.start_addr = ucode_array_start_addr +
+ le32_to_cpu(sos_hdr_v1_3->spl.offset_bytes);
+ adev->psp.rl.size_bytes = le32_to_cpu(sos_hdr_v1_3->rl.size_bytes);
+ adev->psp.rl.start_addr = ucode_array_start_addr +
+ le32_to_cpu(sos_hdr_v1_3->rl.offset_bytes);
+ }
+ break;
+ case 2:
+ sos_hdr_v2_0 = (const struct psp_firmware_header_v2_0 *)adev->psp.sos_fw->data;
+
+ fw_bin_count = le32_to_cpu(sos_hdr_v2_0->psp_fw_bin_count);
+
+ if (fw_bin_count >= UCODE_MAX_PSP_PACKAGING) {
+ dev_err(adev->dev, "packed SOS count exceeds maximum limit\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (sos_hdr_v2_0->header.header_version_minor == 1) {
+ sos_hdr_v2_1 = (const struct psp_firmware_header_v2_1 *)adev->psp.sos_fw->data;
+
+ fw_bin = sos_hdr_v2_1->psp_fw_bin;
+
+ if (psp_is_aux_sos_load_required(psp))
+ start_index = le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
+ else
+ fw_bin_count -= le32_to_cpu(sos_hdr_v2_1->psp_aux_fw_bin_index);
+
+ } else {
+ fw_bin = sos_hdr_v2_0->psp_fw_bin;
+ }
+
+ for (fw_index = start_index; fw_index < fw_bin_count; fw_index++) {
+ err = parse_sos_bin_descriptor(psp, fw_bin + fw_index,
+ sos_hdr_v2_0);
+ if (err)
+ goto out;
+ }
+ break;
+ default:
+ dev_err(adev->dev,
+ "unsupported psp sos firmware\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ return 0;
+out:
+ amdgpu_ucode_release(&adev->psp.sos_fw);
+
+ return err;
+}
+
+static bool is_ta_fw_applicable(struct psp_context *psp,
+ const struct psp_fw_bin_desc *desc)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t fw_version;
+
+ switch (desc->fw_type) {
+ case TA_FW_TYPE_PSP_XGMI:
+ case TA_FW_TYPE_PSP_XGMI_AUX:
+ /* for now, AUX TA only exists on 13.0.6 ta bin,
+ * from v20.00.0x.14
+ */
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
+ IP_VERSION(13, 0, 6)) {
+ fw_version = le32_to_cpu(desc->fw_version);
+
+ if (adev->flags & AMD_IS_APU &&
+ (fw_version & 0xff) >= 0x14)
+ return desc->fw_type == TA_FW_TYPE_PSP_XGMI_AUX;
+ else
+ return desc->fw_type == TA_FW_TYPE_PSP_XGMI;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static int parse_ta_bin_descriptor(struct psp_context *psp,
+ const struct psp_fw_bin_desc *desc,
+ const struct ta_firmware_header_v2_0 *ta_hdr)
+{
+ uint8_t *ucode_start_addr = NULL;
+
+ if (!psp || !desc || !ta_hdr)
+ return -EINVAL;
+
+ if (!is_ta_fw_applicable(psp, desc))
+ return 0;
+
+ ucode_start_addr = (uint8_t *)ta_hdr +
+ le32_to_cpu(desc->offset_bytes) +
+ le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
+ switch (desc->fw_type) {
+ case TA_FW_TYPE_PSP_ASD:
+ psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
+ psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version);
+ psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->asd_context.bin_desc.start_addr = ucode_start_addr;
+ break;
+ case TA_FW_TYPE_PSP_XGMI:
+ case TA_FW_TYPE_PSP_XGMI_AUX:
+ psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
+ psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr;
+ break;
+ case TA_FW_TYPE_PSP_RAS:
+ psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
+ psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->ras_context.context.bin_desc.start_addr = ucode_start_addr;
+ break;
+ case TA_FW_TYPE_PSP_HDCP:
+ psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
+ psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr;
+ break;
+ case TA_FW_TYPE_PSP_DTM:
+ psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
+ psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr;
+ break;
+ case TA_FW_TYPE_PSP_RAP:
+ psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
+ psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->rap_context.context.bin_desc.start_addr = ucode_start_addr;
+ break;
+ case TA_FW_TYPE_PSP_SECUREDISPLAY:
+ psp->securedisplay_context.context.bin_desc.fw_version =
+ le32_to_cpu(desc->fw_version);
+ psp->securedisplay_context.context.bin_desc.size_bytes =
+ le32_to_cpu(desc->size_bytes);
+ psp->securedisplay_context.context.bin_desc.start_addr =
+ ucode_start_addr;
+ break;
+ default:
+ dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
+ break;
+ }
+
+ return 0;
+}
+
+static int parse_ta_v1_microcode(struct psp_context *psp)
+{
+ const struct ta_firmware_header_v1_0 *ta_hdr;
+ struct amdgpu_device *adev = psp->adev;
+
+ ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
+
+ if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
+ return -EINVAL;
+
+ adev->psp.xgmi_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->xgmi.fw_version);
+ adev->psp.xgmi_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->xgmi.size_bytes);
+ adev->psp.xgmi_context.context.bin_desc.start_addr =
+ (uint8_t *)ta_hdr +
+ le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
+ adev->psp.ras_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->ras.fw_version);
+ adev->psp.ras_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->ras.size_bytes);
+ adev->psp.ras_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
+ le32_to_cpu(ta_hdr->ras.offset_bytes);
+
+ adev->psp.hdcp_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->hdcp.fw_version);
+ adev->psp.hdcp_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->hdcp.size_bytes);
+ adev->psp.hdcp_context.context.bin_desc.start_addr =
+ (uint8_t *)ta_hdr +
+ le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
+ adev->psp.dtm_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->dtm.fw_version);
+ adev->psp.dtm_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->dtm.size_bytes);
+ adev->psp.dtm_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
+ le32_to_cpu(ta_hdr->dtm.offset_bytes);
+
+ adev->psp.securedisplay_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->securedisplay.fw_version);
+ adev->psp.securedisplay_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->securedisplay.size_bytes);
+ adev->psp.securedisplay_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
+ le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
+
+ adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
+
+ return 0;
+}
+
+static int parse_ta_v2_microcode(struct psp_context *psp)
+{
+ const struct ta_firmware_header_v2_0 *ta_hdr;
+ struct amdgpu_device *adev = psp->adev;
+ int err = 0;
+ int ta_index = 0;
+
+ ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
+
+ if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
+ return -EINVAL;
+
+ if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
+ dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
+ return -EINVAL;
+ }
+
+ for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
+ err = parse_ta_bin_descriptor(psp,
+ &ta_hdr->ta_fw_bin[ta_index],
+ ta_hdr);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
+{
+ const struct common_firmware_header *hdr;
+ struct amdgpu_device *adev = psp->adev;
+ int err;
+
+ if (amdgpu_is_kicker_fw(adev))
+ err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_ta_kicker.bin", chip_name);
+ else
+ err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_ta.bin", chip_name);
+ if (err)
+ return err;
+
+ hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
+ switch (le16_to_cpu(hdr->header_version_major)) {
+ case 1:
+ err = parse_ta_v1_microcode(psp);
+ break;
+ case 2:
+ err = parse_ta_v2_microcode(psp);
+ break;
+ default:
+ dev_err(adev->dev, "unsupported TA header version\n");
+ err = -EINVAL;
+ }
+
+ if (err)
+ amdgpu_ucode_release(&adev->psp.ta_fw);
+
+ return err;
+}
+
+int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
+{
+ struct amdgpu_device *adev = psp->adev;
+ const struct psp_firmware_header_v1_0 *cap_hdr_v1_0;
+ struct amdgpu_firmware_info *info = NULL;
+ int err = 0;
+
+ if (!amdgpu_sriov_vf(adev)) {
+ dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
+ return -EINVAL;
+ }
+
+ err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, AMDGPU_UCODE_OPTIONAL,
+ "amdgpu/%s_cap.bin", chip_name);
+ if (err) {
+ if (err == -ENODEV) {
+ dev_warn(adev->dev, "cap microcode does not exist, skip\n");
+ err = 0;
+ } else {
+ dev_err(adev->dev, "fail to initialize cap microcode\n");
+ }
+ goto out;
+ }
+
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
+ info->ucode_id = AMDGPU_UCODE_ID_CAP;
+ info->fw = adev->psp.cap_fw;
+ cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *)
+ adev->psp.cap_fw->data;
+ adev->firmware.fw_size += ALIGN(
+ le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE);
+ adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version);
+ adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version);
+ adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes);
+
+ return 0;
+
+out:
+ amdgpu_ucode_release(&adev->psp.cap_fw);
+ return err;
+}
+
+int psp_config_sq_perfmon(struct psp_context *psp,
+ uint32_t xcp_id, bool core_override_enable,
+ bool reg_override_enable, bool perfmon_override_enable)
+{
+ int ret;
+
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (xcp_id > MAX_XCP) {
+ dev_err(psp->adev->dev, "invalid xcp_id %d\n", xcp_id);
+ return -EINVAL;
+ }
+
+ if (amdgpu_ip_version(psp->adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6)) {
+ dev_err(psp->adev->dev, "Unsupported MP0 version 0x%x for CONFIG_SQ_PERFMON command\n",
+ amdgpu_ip_version(psp->adev, MP0_HWIP, 0));
+ return -EINVAL;
+ }
+ struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
+
+ cmd->cmd_id = GFX_CMD_ID_CONFIG_SQ_PERFMON;
+ cmd->cmd.config_sq_perfmon.gfx_xcp_mask = BIT_MASK(xcp_id);
+ cmd->cmd.config_sq_perfmon.core_override = core_override_enable;
+ cmd->cmd.config_sq_perfmon.reg_override = reg_override_enable;
+ cmd->cmd.config_sq_perfmon.perfmon_override = perfmon_override_enable;
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+ if (ret)
+ dev_warn(psp->adev->dev, "PSP failed to config sq: xcp%d core%d reg%d perfmon%d\n",
+ xcp_id, core_override_enable, reg_override_enable, perfmon_override_enable);
+
+ release_psp_cmd_buf(psp);
+ return ret;
+}
+
+static int psp_set_clockgating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int psp_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
}
+static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct amdgpu_ip_block *ip_block;
+ uint32_t fw_ver;
+ int ret;
+
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
+ if (!ip_block || !ip_block->status.late_initialized) {
+ dev_info(adev->dev, "PSP block is not ready yet\n.");
+ return -EBUSY;
+ }
+
+ mutex_lock(&adev->psp.mutex);
+ ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
+ mutex_unlock(&adev->psp.mutex);
+
+ if (ret) {
+ dev_err(adev->dev, "Failed to read USBC PD FW, err = %d\n", ret);
+ return ret;
+ }
+
+ return sysfs_emit(buf, "%x\n", fw_ver);
+}
+
+static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ int ret, idx;
+ const struct firmware *usbc_pd_fw;
+ struct amdgpu_bo *fw_buf_bo = NULL;
+ uint64_t fw_pri_mc_addr;
+ void *fw_pri_cpu_addr;
+ struct amdgpu_ip_block *ip_block;
+
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP);
+ if (!ip_block || !ip_block->status.late_initialized) {
+ dev_err(adev->dev, "PSP block is not ready yet.");
+ return -EBUSY;
+ }
+
+ if (!drm_dev_enter(ddev, &idx))
+ return -ENODEV;
+
+ ret = amdgpu_ucode_request(adev, &usbc_pd_fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s", buf);
+ if (ret)
+ goto fail;
+
+ /* LFB address which is aligned to 1MB boundary per PSP request */
+ ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &fw_buf_bo, &fw_pri_mc_addr,
+ &fw_pri_cpu_addr);
+ if (ret)
+ goto rel_buf;
+
+ memcpy_toio(fw_pri_cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
+
+ mutex_lock(&adev->psp.mutex);
+ ret = psp_load_usbc_pd_fw(&adev->psp, fw_pri_mc_addr);
+ mutex_unlock(&adev->psp.mutex);
+
+ amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
+
+rel_buf:
+ amdgpu_ucode_release(&usbc_pd_fw);
+fail:
+ if (ret) {
+ dev_err(adev->dev, "Failed to load USBC PD FW, err = %d", ret);
+ count = ret;
+ }
+
+ drm_dev_exit(idx);
+ return count;
+}
+
+void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size)
+{
+ int idx;
+
+ if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
+ return;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, start_addr, bin_size);
+
+ drm_dev_exit(idx);
+}
+
+/**
+ * DOC: usbc_pd_fw
+ * Reading from this file will retrieve the USB-C PD firmware version. Writing to
+ * this file will trigger the update process.
+ */
+static DEVICE_ATTR(usbc_pd_fw, 0644,
+ psp_usbc_pd_fw_sysfs_read,
+ psp_usbc_pd_fw_sysfs_write);
+
+int is_psp_fw_valid(struct psp_bin_desc bin)
+{
+ return bin.size_bytes;
+}
+
+static ssize_t amdgpu_psp_vbflash_write(struct file *filp, struct kobject *kobj,
+ const struct bin_attribute *bin_attr,
+ char *buffer, loff_t pos, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ adev->psp.vbflash_done = false;
+
+ /* Safeguard against memory drain */
+ if (adev->psp.vbflash_image_size > AMD_VBIOS_FILE_MAX_SIZE_B) {
+ dev_err(adev->dev, "File size cannot exceed %u\n", AMD_VBIOS_FILE_MAX_SIZE_B);
+ kvfree(adev->psp.vbflash_tmp_buf);
+ adev->psp.vbflash_tmp_buf = NULL;
+ adev->psp.vbflash_image_size = 0;
+ return -ENOMEM;
+ }
+
+ /* TODO Just allocate max for now and optimize to realloc later if needed */
+ if (!adev->psp.vbflash_tmp_buf) {
+ adev->psp.vbflash_tmp_buf = kvmalloc(AMD_VBIOS_FILE_MAX_SIZE_B, GFP_KERNEL);
+ if (!adev->psp.vbflash_tmp_buf)
+ return -ENOMEM;
+ }
+
+ mutex_lock(&adev->psp.mutex);
+ memcpy(adev->psp.vbflash_tmp_buf + pos, buffer, count);
+ adev->psp.vbflash_image_size += count;
+ mutex_unlock(&adev->psp.mutex);
+
+ dev_dbg(adev->dev, "IFWI staged for update\n");
+
+ return count;
+}
+
+static ssize_t amdgpu_psp_vbflash_read(struct file *filp, struct kobject *kobj,
+ const struct bin_attribute *bin_attr, char *buffer,
+ loff_t pos, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct amdgpu_bo *fw_buf_bo = NULL;
+ uint64_t fw_pri_mc_addr;
+ void *fw_pri_cpu_addr;
+ int ret;
+
+ if (adev->psp.vbflash_image_size == 0)
+ return -EINVAL;
+
+ dev_dbg(adev->dev, "PSP IFWI flash process initiated\n");
+
+ ret = amdgpu_bo_create_kernel(adev, adev->psp.vbflash_image_size,
+ AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &fw_buf_bo,
+ &fw_pri_mc_addr,
+ &fw_pri_cpu_addr);
+ if (ret)
+ goto rel_buf;
+
+ memcpy_toio(fw_pri_cpu_addr, adev->psp.vbflash_tmp_buf, adev->psp.vbflash_image_size);
+
+ mutex_lock(&adev->psp.mutex);
+ ret = psp_update_spirom(&adev->psp, fw_pri_mc_addr);
+ mutex_unlock(&adev->psp.mutex);
+
+ amdgpu_bo_free_kernel(&fw_buf_bo, &fw_pri_mc_addr, &fw_pri_cpu_addr);
+
+rel_buf:
+ kvfree(adev->psp.vbflash_tmp_buf);
+ adev->psp.vbflash_tmp_buf = NULL;
+ adev->psp.vbflash_image_size = 0;
+
+ if (ret) {
+ dev_err(adev->dev, "Failed to load IFWI, err = %d\n", ret);
+ return ret;
+ }
+
+ dev_dbg(adev->dev, "PSP IFWI flash process done\n");
+ return 0;
+}
+
+/**
+ * DOC: psp_vbflash
+ * Writing to this file will stage an IFWI for update. Reading from this file
+ * will trigger the update process.
+ */
+static const struct bin_attribute psp_vbflash_bin_attr = {
+ .attr = {.name = "psp_vbflash", .mode = 0660},
+ .size = 0,
+ .write = amdgpu_psp_vbflash_write,
+ .read = amdgpu_psp_vbflash_read,
+};
+
+/**
+ * DOC: psp_vbflash_status
+ * The status of the flash process.
+ * 0: IFWI flash not complete.
+ * 1: IFWI flash complete.
+ */
+static ssize_t amdgpu_psp_vbflash_status(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ uint32_t vbflash_status;
+
+ vbflash_status = psp_vbflash_status(&adev->psp);
+ if (!adev->psp.vbflash_done)
+ vbflash_status = 0;
+ else if (adev->psp.vbflash_done && !(vbflash_status & 0x80000000))
+ vbflash_status = 1;
+
+ return sysfs_emit(buf, "0x%x\n", vbflash_status);
+}
+static DEVICE_ATTR(psp_vbflash_status, 0440, amdgpu_psp_vbflash_status, NULL);
+
+static const struct bin_attribute *const bin_flash_attrs[] = {
+ &psp_vbflash_bin_attr,
+ NULL
+};
+
+static struct attribute *flash_attrs[] = {
+ &dev_attr_psp_vbflash_status.attr,
+ &dev_attr_usbc_pd_fw.attr,
+ NULL
+};
+
+static umode_t amdgpu_flash_attr_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (attr == &dev_attr_usbc_pd_fw.attr)
+ return adev->psp.sup_pd_fw_up ? 0660 : 0;
+
+ return adev->psp.sup_ifwi_up ? 0440 : 0;
+}
+
+static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj,
+ const struct bin_attribute *attr,
+ int idx)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ return adev->psp.sup_ifwi_up ? 0660 : 0;
+}
+
+const struct attribute_group amdgpu_flash_attr_group = {
+ .attrs = flash_attrs,
+ .bin_attrs = bin_flash_attrs,
+ .is_bin_visible = amdgpu_bin_flash_attr_is_visible,
+ .is_visible = amdgpu_flash_attr_is_visible,
+};
+
+#if defined(CONFIG_DEBUG_FS)
+static int psp_read_spirom_debugfs_open(struct inode *inode, struct file *filp)
+{
+ struct amdgpu_device *adev = filp->f_inode->i_private;
+ struct spirom_bo *bo_triplet;
+ int ret;
+
+ /* serialize the open() file calling */
+ if (!mutex_trylock(&adev->psp.mutex))
+ return -EBUSY;
+
+ /*
+ * make sure only one userpace process is alive for dumping so that
+ * only one memory buffer of AMD_VBIOS_FILE_MAX_SIZE * 2 is consumed.
+ * let's say the case where one process try opening the file while
+ * another one has proceeded to read or release. In this way, eliminate
+ * the use of mutex for read() or release() callback as well.
+ */
+ if (adev->psp.spirom_dump_trip) {
+ mutex_unlock(&adev->psp.mutex);
+ return -EBUSY;
+ }
+
+ bo_triplet = kzalloc(sizeof(struct spirom_bo), GFP_KERNEL);
+ if (!bo_triplet) {
+ mutex_unlock(&adev->psp.mutex);
+ return -ENOMEM;
+ }
+
+ ret = amdgpu_bo_create_kernel(adev, AMD_VBIOS_FILE_MAX_SIZE_B * 2,
+ AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &bo_triplet->bo,
+ &bo_triplet->mc_addr,
+ &bo_triplet->cpu_addr);
+ if (ret)
+ goto rel_trip;
+
+ ret = psp_dump_spirom(&adev->psp, bo_triplet->mc_addr);
+ if (ret)
+ goto rel_bo;
+
+ adev->psp.spirom_dump_trip = bo_triplet;
+ mutex_unlock(&adev->psp.mutex);
+ return 0;
+rel_bo:
+ amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
+ &bo_triplet->cpu_addr);
+rel_trip:
+ kfree(bo_triplet);
+ mutex_unlock(&adev->psp.mutex);
+ dev_err(adev->dev, "Trying IFWI dump fails, err = %d\n", ret);
+ return ret;
+}
+
+static ssize_t psp_read_spirom_debugfs_read(struct file *filp, char __user *buf, size_t size,
+ loff_t *pos)
+{
+ struct amdgpu_device *adev = filp->f_inode->i_private;
+ struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
+
+ if (!bo_triplet)
+ return -EINVAL;
+
+ return simple_read_from_buffer(buf,
+ size,
+ pos, bo_triplet->cpu_addr,
+ AMD_VBIOS_FILE_MAX_SIZE_B * 2);
+}
+
+static int psp_read_spirom_debugfs_release(struct inode *inode, struct file *filp)
+{
+ struct amdgpu_device *adev = filp->f_inode->i_private;
+ struct spirom_bo *bo_triplet = adev->psp.spirom_dump_trip;
+
+ if (bo_triplet) {
+ amdgpu_bo_free_kernel(&bo_triplet->bo, &bo_triplet->mc_addr,
+ &bo_triplet->cpu_addr);
+ kfree(bo_triplet);
+ }
+
+ adev->psp.spirom_dump_trip = NULL;
+ return 0;
+}
+
+static const struct file_operations psp_dump_spirom_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .open = psp_read_spirom_debugfs_open,
+ .read = psp_read_spirom_debugfs_read,
+ .release = psp_read_spirom_debugfs_release,
+ .llseek = default_llseek,
+};
+#endif
+
+void amdgpu_psp_debugfs_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+
+ debugfs_create_file_size("psp_spirom_dump", 0444, minor->debugfs_root,
+ adev, &psp_dump_spirom_debugfs_ops, AMD_VBIOS_FILE_MAX_SIZE_B * 2);
+#endif
+}
+
const struct amd_ip_funcs psp_ip_funcs = {
.name = "psp",
.early_init = psp_early_init,
- .late_init = NULL,
.sw_init = psp_sw_init,
.sw_fini = psp_sw_fini,
.hw_init = psp_hw_init,
.hw_fini = psp_hw_fini,
.suspend = psp_suspend,
.resume = psp_resume,
- .is_idle = NULL,
- .wait_for_idle = NULL,
- .soft_reset = NULL,
.set_clockgating_state = psp_set_clockgating_state,
.set_powergating_state = psp_set_powergating_state,
};
-static const struct amdgpu_psp_funcs psp_funcs = {
- .check_fw_loading_status = psp_check_fw_loading_status,
+const struct amdgpu_ip_block_version psp_v3_1_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_PSP,
+ .major = 3,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &psp_ip_funcs,
};
-static void psp_set_funcs(struct amdgpu_device *adev)
-{
- if (NULL == adev->firmware.funcs)
- adev->firmware.funcs = &psp_funcs;
-}
+const struct amdgpu_ip_block_version psp_v10_0_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_PSP,
+ .major = 10,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &psp_ip_funcs,
+};
-const struct amdgpu_ip_block_version psp_v3_1_ip_block =
-{
+const struct amdgpu_ip_block_version psp_v11_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_PSP,
- .major = 3,
- .minor = 1,
+ .major = 11,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &psp_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version psp_v11_0_8_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_PSP,
+ .major = 11,
+ .minor = 0,
+ .rev = 8,
+ .funcs = &psp_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version psp_v12_0_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_PSP,
+ .major = 12,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &psp_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_PSP,
+ .major = 13,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &psp_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version psp_v13_0_4_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_PSP,
+ .major = 13,
+ .minor = 0,
+ .rev = 4,
+ .funcs = &psp_ip_funcs,
+};
+
+const struct amdgpu_ip_block_version psp_v14_0_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_PSP,
+ .major = 14,
+ .minor = 0,
.rev = 0,
.funcs = &psp_ip_funcs,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 0301e4e0b297..237b624aa51c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -27,14 +27,86 @@
#include "amdgpu.h"
#include "psp_gfx_if.h"
+#include "ta_xgmi_if.h"
+#include "ta_ras_if.h"
+#include "ta_rap_if.h"
+#include "ta_secureDisplay_if.h"
#define PSP_FENCE_BUFFER_SIZE 0x1000
#define PSP_CMD_BUFFER_SIZE 0x1000
-#define PSP_ASD_SHARED_MEM_SIZE 0x4000
#define PSP_1_MEG 0x100000
+#define PSP_TMR_SIZE(adev) ((adev)->asic_type == CHIP_ALDEBARAN ? 0x800000 : 0x400000)
+#define PSP_TMR_ALIGNMENT 0x100000
+#define PSP_FW_NAME_LEN 0x24
-enum psp_ring_type
-{
+/* VBIOS gfl defines */
+#define MBOX_READY_MASK 0x80000000
+#define MBOX_STATUS_MASK 0x0000FFFF
+#define MBOX_COMMAND_MASK 0x00FF0000
+#define MBOX_READY_FLAG 0x80000000
+#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_LO 0x2
+#define C2PMSG_CMD_SPI_UPDATE_ROM_IMAGE_ADDR_HI 0x3
+#define C2PMSG_CMD_SPI_UPDATE_FLASH_IMAGE 0x4
+#define C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_LO 0xf
+#define C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_HI 0x10
+#define C2PMSG_CMD_SPI_GET_FLASH_IMAGE 0x11
+
+/* Command register bit 31 set to indicate readiness */
+#define MBOX_TOS_READY_FLAG (GFX_FLAG_RESPONSE)
+#define MBOX_TOS_READY_MASK (GFX_CMD_RESPONSE_MASK | GFX_CMD_STATUS_MASK)
+
+/* Values to check for a successful GFX_CMD response wait. Check against
+ * both status bits and response state - helps to detect a command failure
+ * or other unexpected cases like a device drop reading all 0xFFs
+ */
+#define MBOX_TOS_RESP_FLAG (GFX_FLAG_RESPONSE)
+#define MBOX_TOS_RESP_MASK (GFX_CMD_RESPONSE_MASK | GFX_CMD_STATUS_MASK)
+
+extern const struct attribute_group amdgpu_flash_attr_group;
+
+enum psp_shared_mem_size {
+ PSP_ASD_SHARED_MEM_SIZE = 0x0,
+ PSP_XGMI_SHARED_MEM_SIZE = 0x4000,
+ PSP_RAS_SHARED_MEM_SIZE = 0x4000,
+ PSP_HDCP_SHARED_MEM_SIZE = 0x4000,
+ PSP_DTM_SHARED_MEM_SIZE = 0x4000,
+ PSP_RAP_SHARED_MEM_SIZE = 0x4000,
+ PSP_SECUREDISPLAY_SHARED_MEM_SIZE = 0x4000,
+};
+
+enum ta_type_id {
+ TA_TYPE_XGMI = 1,
+ TA_TYPE_RAS,
+ TA_TYPE_HDCP,
+ TA_TYPE_DTM,
+ TA_TYPE_RAP,
+ TA_TYPE_SECUREDISPLAY,
+
+ TA_TYPE_MAX_INDEX,
+};
+
+struct psp_context;
+struct psp_xgmi_node_info;
+struct psp_xgmi_topology_info;
+struct psp_bin_desc;
+
+enum psp_bootloader_cmd {
+ PSP_BL__LOAD_SYSDRV = 0x10000,
+ PSP_BL__LOAD_SOSDRV = 0x20000,
+ PSP_BL__LOAD_KEY_DATABASE = 0x80000,
+ PSP_BL__LOAD_SOCDRV = 0xB0000,
+ PSP_BL__LOAD_DBGDRV = 0xC0000,
+ PSP_BL__LOAD_HADDRV = PSP_BL__LOAD_DBGDRV,
+ PSP_BL__LOAD_INTFDRV = 0xD0000,
+ PSP_BL__LOAD_RASDRV = 0xE0000,
+ PSP_BL__LOAD_IPKEYMGRDRV = 0xF0000,
+ PSP_BL__DRAM_LONG_TRAIN = 0x100000,
+ PSP_BL__DRAM_SHORT_TRAIN = 0x200000,
+ PSP_BL__LOAD_TOS_SPL_TABLE = 0x10000000,
+ PSP_BL__LOAD_SPDMDRV = 0x20000000,
+};
+
+enum psp_ring_type {
PSP_RING_TYPE__INVALID = 0,
/*
* These values map to the way the PSP kernel identifies the
@@ -44,70 +116,337 @@ enum psp_ring_type
PSP_RING_TYPE__KM = 2 /* Kernel mode ring (formerly called GPCOM) */
};
-struct psp_ring
-{
+struct psp_ring {
enum psp_ring_type ring_type;
struct psp_gfx_rb_frame *ring_mem;
uint64_t ring_mem_mc_addr;
void *ring_mem_handle;
uint32_t ring_size;
+ uint32_t ring_wptr;
};
-struct psp_context
-{
- struct amdgpu_device *adev;
- struct psp_ring km_ring;
- struct psp_gfx_cmd_resp *cmd;
+/* More registers may will be supported */
+enum psp_reg_prog_id {
+ PSP_REG_IH_RB_CNTL = 0, /* register IH_RB_CNTL */
+ PSP_REG_IH_RB_CNTL_RING1 = 1, /* register IH_RB_CNTL_RING1 */
+ PSP_REG_IH_RB_CNTL_RING2 = 2, /* register IH_RB_CNTL_RING2 */
+ PSP_REG_MMHUB_L1_TLB_CNTL = 25,
+ PSP_REG_LAST
+};
+
+#define PSP_WAITREG_CHANGED BIT(0) /* check if the value has changed */
+#define PSP_WAITREG_NOVERBOSE BIT(1) /* No error verbose */
+struct psp_funcs {
int (*init_microcode)(struct psp_context *psp);
+ int (*wait_for_bootloader)(struct psp_context *psp);
+ int (*bootloader_load_kdb)(struct psp_context *psp);
+ int (*bootloader_load_spl)(struct psp_context *psp);
int (*bootloader_load_sysdrv)(struct psp_context *psp);
+ int (*bootloader_load_soc_drv)(struct psp_context *psp);
+ int (*bootloader_load_intf_drv)(struct psp_context *psp);
+ int (*bootloader_load_dbg_drv)(struct psp_context *psp);
+ int (*bootloader_load_ras_drv)(struct psp_context *psp);
+ int (*bootloader_load_ipkeymgr_drv)(struct psp_context *psp);
+ int (*bootloader_load_spdm_drv)(struct psp_context *psp);
int (*bootloader_load_sos)(struct psp_context *psp);
- int (*prep_cmd_buf)(struct amdgpu_firmware_info *ucode,
- struct psp_gfx_cmd_resp *cmd);
- int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type);
- int (*ring_create)(struct psp_context *psp, enum psp_ring_type ring_type);
+ int (*ring_create)(struct psp_context *psp,
+ enum psp_ring_type ring_type);
+ int (*ring_stop)(struct psp_context *psp,
+ enum psp_ring_type ring_type);
int (*ring_destroy)(struct psp_context *psp,
enum psp_ring_type ring_type);
- int (*cmd_submit)(struct psp_context *psp, struct amdgpu_firmware_info *ucode,
- uint64_t cmd_buf_mc_addr, uint64_t fence_mc_addr, int index);
- bool (*compare_sram_data)(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- enum AMDGPU_UCODE_ID ucode_type);
bool (*smu_reload_quirk)(struct psp_context *psp);
+ int (*mode1_reset)(struct psp_context *psp);
+ int (*mem_training)(struct psp_context *psp, uint32_t ops);
+ uint32_t (*ring_get_wptr)(struct psp_context *psp);
+ void (*ring_set_wptr)(struct psp_context *psp, uint32_t value);
+ int (*load_usbc_pd_fw)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
+ int (*read_usbc_pd_fw)(struct psp_context *psp, uint32_t *fw_ver);
+ int (*update_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
+ int (*dump_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
+ int (*vbflash_stat)(struct psp_context *psp);
+ int (*fatal_error_recovery_quirk)(struct psp_context *psp);
+ bool (*get_ras_capability)(struct psp_context *psp);
+ bool (*is_aux_sos_load_required)(struct psp_context *psp);
+ bool (*is_reload_needed)(struct psp_context *psp);
+ int (*reg_program_no_ring)(struct psp_context *psp, uint32_t val,
+ enum psp_reg_prog_id id);
+};
- /* fence buffer */
- struct amdgpu_bo *fw_pri_bo;
- uint64_t fw_pri_mc_addr;
+struct ta_funcs {
+ int (*fn_ta_initialize)(struct psp_context *psp);
+ int (*fn_ta_invoke)(struct psp_context *psp, uint32_t ta_cmd_id);
+ int (*fn_ta_terminate)(struct psp_context *psp);
+};
+
+#define AMDGPU_XGMI_MAX_CONNECTED_NODES 64
+struct psp_xgmi_node_info {
+ uint64_t node_id;
+ uint8_t num_hops;
+ uint8_t is_sharing_enabled;
+ enum ta_xgmi_assigned_sdma_engine sdma_engine;
+ uint8_t num_links;
+ struct xgmi_connected_port_num port_num[TA_XGMI__MAX_PORT_NUM];
+};
+
+struct psp_xgmi_topology_info {
+ uint32_t num_nodes;
+ struct psp_xgmi_node_info nodes[AMDGPU_XGMI_MAX_CONNECTED_NODES];
+};
+
+struct psp_bin_desc {
+ uint32_t fw_version;
+ uint32_t feature_version;
+ uint32_t size_bytes;
+ uint8_t *start_addr;
+};
+
+struct ta_mem_context {
+ struct amdgpu_bo *shared_bo;
+ uint64_t shared_mc_addr;
+ void *shared_buf;
+ enum psp_shared_mem_size shared_mem_size;
+};
+
+struct ta_context {
+ bool initialized;
+ uint32_t session_id;
+ uint32_t resp_status;
+ struct ta_mem_context mem_context;
+ struct psp_bin_desc bin_desc;
+ enum psp_gfx_cmd_id ta_load_type;
+ enum ta_type_id ta_type;
+};
+
+struct ta_cp_context {
+ struct ta_context context;
+ struct mutex mutex;
+};
+
+struct psp_xgmi_context {
+ struct ta_context context;
+ struct psp_xgmi_topology_info top_info;
+ bool supports_extended_data;
+ uint8_t xgmi_ta_caps;
+};
+
+struct psp_ras_context {
+ struct ta_context context;
+ struct amdgpu_ras *ras;
+ struct mutex mutex;
+};
+
+#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
+#define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000
+#define GDDR6_MEM_TRAINING_OFFSET 0x8000
+/*Define the VRAM size that will be encroached by BIST training.*/
+#define BIST_MEM_TRAINING_ENCROACHED_SIZE 0x2000000
+
+enum psp_memory_training_init_flag {
+ PSP_MEM_TRAIN_NOT_SUPPORT = 0x0,
+ PSP_MEM_TRAIN_SUPPORT = 0x1,
+ PSP_MEM_TRAIN_INIT_FAILED = 0x2,
+ PSP_MEM_TRAIN_RESERVE_SUCCESS = 0x4,
+ PSP_MEM_TRAIN_INIT_SUCCESS = 0x8,
+};
+
+enum psp_memory_training_ops {
+ PSP_MEM_TRAIN_SEND_LONG_MSG = 0x1,
+ PSP_MEM_TRAIN_SAVE = 0x2,
+ PSP_MEM_TRAIN_RESTORE = 0x4,
+ PSP_MEM_TRAIN_SEND_SHORT_MSG = 0x8,
+ PSP_MEM_TRAIN_COLD_BOOT = PSP_MEM_TRAIN_SEND_LONG_MSG,
+ PSP_MEM_TRAIN_RESUME = PSP_MEM_TRAIN_SEND_SHORT_MSG,
+};
+
+struct psp_memory_training_context {
+ /*training data size*/
+ u64 train_data_size;
+ /*
+ * sys_cache
+ * cpu virtual address
+ * system memory buffer that used to store the training data.
+ */
+ void *sys_cache;
+
+ /*vram offset of the p2c training data*/
+ u64 p2c_train_data_offset;
+
+ /*vram offset of the c2p training data*/
+ u64 c2p_train_data_offset;
+ struct amdgpu_bo *c2p_bo;
+
+ enum psp_memory_training_init_flag init;
+ u32 training_cnt;
+ bool enable_mem_training;
+};
+
+/** PSP runtime DB **/
+#define PSP_RUNTIME_DB_SIZE_IN_BYTES 0x10000
+#define PSP_RUNTIME_DB_OFFSET 0x100000
+#define PSP_RUNTIME_DB_COOKIE_ID 0x0ed5
+#define PSP_RUNTIME_DB_VER_1 0x0100
+#define PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT 0x40
+
+enum psp_runtime_entry_type {
+ PSP_RUNTIME_ENTRY_TYPE_INVALID = 0x0,
+ PSP_RUNTIME_ENTRY_TYPE_TEST = 0x1,
+ PSP_RUNTIME_ENTRY_TYPE_MGPU_COMMON = 0x2, /* Common mGPU runtime data */
+ PSP_RUNTIME_ENTRY_TYPE_MGPU_WAFL = 0x3, /* WAFL runtime data */
+ PSP_RUNTIME_ENTRY_TYPE_MGPU_XGMI = 0x4, /* XGMI runtime data */
+ PSP_RUNTIME_ENTRY_TYPE_BOOT_CONFIG = 0x5, /* Boot Config runtime data */
+ PSP_RUNTIME_ENTRY_TYPE_PPTABLE_ERR_STATUS = 0x6, /* SCPM validation data */
+};
+
+/* PSP runtime DB header */
+struct psp_runtime_data_header {
+ /* determine the existence of runtime db */
+ uint16_t cookie;
+ /* version of runtime db */
+ uint16_t version;
+};
+
+/* PSP runtime DB entry */
+struct psp_runtime_entry {
+ /* type of runtime db entry */
+ uint32_t entry_type;
+ /* offset of entry in bytes */
+ uint16_t offset;
+ /* size of entry in bytes */
+ uint16_t size;
+};
+
+/* PSP runtime DB directory */
+struct psp_runtime_data_directory {
+ /* number of valid entries */
+ uint16_t entry_count;
+ /* db entries*/
+ struct psp_runtime_entry entry_list[PSP_RUNTIME_DB_DIAG_ENTRY_MAX_COUNT];
+};
+
+/* PSP runtime DB boot config feature bitmask */
+enum psp_runtime_boot_cfg_feature {
+ BOOT_CFG_FEATURE_GECC = 0x1,
+ BOOT_CFG_FEATURE_TWO_STAGE_DRAM_TRAINING = 0x2,
+};
+
+/* PSP run time DB SCPM authentication defines */
+enum psp_runtime_scpm_authentication {
+ SCPM_DISABLE = 0x0,
+ SCPM_ENABLE = 0x1,
+ SCPM_ENABLE_WITH_SCPM_ERR = 0x2,
+};
+
+/* PSP runtime DB boot config entry */
+struct psp_runtime_boot_cfg_entry {
+ uint32_t boot_cfg_bitmask;
+ uint32_t reserved;
+};
+
+/* PSP runtime DB SCPM entry */
+struct psp_runtime_scpm_entry {
+ enum psp_runtime_scpm_authentication scpm_status;
+};
+
+#if defined(CONFIG_DEBUG_FS)
+struct spirom_bo {
+ struct amdgpu_bo *bo;
+ uint64_t mc_addr;
+ void *cpu_addr;
+};
+#endif
+
+struct psp_context {
+ struct amdgpu_device *adev;
+ struct psp_ring km_ring;
+ struct psp_gfx_cmd_resp *cmd;
+
+ const struct psp_funcs *funcs;
+ const struct ta_funcs *ta_funcs;
+
+ /* firmware buffer */
+ struct amdgpu_bo *fw_pri_bo;
+ uint64_t fw_pri_mc_addr;
void *fw_pri_buf;
/* sos firmware */
const struct firmware *sos_fw;
- uint32_t sos_fw_version;
- uint32_t sos_feature_version;
- uint32_t sys_bin_size;
- uint32_t sos_bin_size;
- uint8_t *sys_start_addr;
- uint8_t *sos_start_addr;
+ struct psp_bin_desc sys;
+ struct psp_bin_desc sos;
+ struct psp_bin_desc toc;
+ struct psp_bin_desc kdb;
+ struct psp_bin_desc spl;
+ struct psp_bin_desc rl;
+ struct psp_bin_desc soc_drv;
+ struct psp_bin_desc intf_drv;
+ struct psp_bin_desc dbg_drv;
+ struct psp_bin_desc ras_drv;
+ struct psp_bin_desc ipkeymgr_drv;
+ struct psp_bin_desc spdm_drv;
/* tmr buffer */
- struct amdgpu_bo *tmr_bo;
- uint64_t tmr_mc_addr;
- void *tmr_buf;
+ struct amdgpu_bo *tmr_bo;
+ uint64_t tmr_mc_addr;
- /* asd firmware and buffer */
+ /* asd firmware */
const struct firmware *asd_fw;
- uint32_t asd_fw_version;
- uint32_t asd_feature_version;
- uint32_t asd_ucode_size;
- uint8_t *asd_start_addr;
- struct amdgpu_bo *asd_shared_bo;
- uint64_t asd_shared_mc_addr;
- void *asd_shared_buf;
+
+ /* toc firmware */
+ const struct firmware *toc_fw;
+
+ /* cap firmware */
+ const struct firmware *cap_fw;
/* fence buffer */
- struct amdgpu_bo *fence_buf_bo;
- uint64_t fence_buf_mc_addr;
+ struct amdgpu_bo *fence_buf_bo;
+ uint64_t fence_buf_mc_addr;
void *fence_buf;
+
+ /* cmd buffer */
+ struct amdgpu_bo *cmd_buf_bo;
+ uint64_t cmd_buf_mc_addr;
+ struct psp_gfx_cmd_resp *cmd_buf_mem;
+
+ /* fence value associated with cmd buffer */
+ atomic_t fence_value;
+ /* flag to mark whether gfx fw autoload is supported or not */
+ bool autoload_supported;
+ /* flag to mark whether psp use runtime TMR or boottime TMR */
+ bool boot_time_tmr;
+ /* flag to mark whether df cstate management centralized to PMFW */
+ bool pmfw_centralized_cstate_management;
+
+ /* xgmi ta firmware and buffer */
+ const struct firmware *ta_fw;
+ uint32_t ta_fw_version;
+
+ uint32_t cap_fw_version;
+ uint32_t cap_feature_version;
+ uint32_t cap_ucode_size;
+
+ struct ta_context asd_context;
+ struct psp_xgmi_context xgmi_context;
+ struct psp_ras_context ras_context;
+ struct ta_cp_context hdcp_context;
+ struct ta_cp_context dtm_context;
+ struct ta_cp_context rap_context;
+ struct ta_cp_context securedisplay_context;
+ struct mutex mutex;
+ struct psp_memory_training_context mem_train_ctx;
+
+ uint32_t boot_cfg_bitmask;
+
+ /* firmware upgrades supported */
+ bool sup_pd_fw_up;
+ bool sup_ifwi_up;
+
+ char *vbflash_tmp_buf;
+ size_t vbflash_image_size;
+ bool vbflash_done;
+#if defined(CONFIG_DEBUG_FS)
+ struct spirom_bo *spirom_dump_trip;
+#endif
};
struct amdgpu_psp_funcs {
@@ -115,27 +454,173 @@ struct amdgpu_psp_funcs {
enum AMDGPU_UCODE_ID);
};
-#define psp_prep_cmd_buf(ucode, type) (psp)->prep_cmd_buf((ucode), (type))
-#define psp_ring_init(psp, type) (psp)->ring_init((psp), (type))
-#define psp_ring_create(psp, type) (psp)->ring_create((psp), (type))
-#define psp_ring_destroy(psp, type) ((psp)->ring_destroy((psp), (type)))
-#define psp_cmd_submit(psp, ucode, cmd_mc, fence_mc, index) \
- (psp)->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index))
-#define psp_compare_sram_data(psp, ucode, type) \
- (psp)->compare_sram_data((psp), (ucode), (type))
+
+#define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type))
+#define psp_ring_stop(psp, type) (psp)->funcs->ring_stop((psp), (type))
+#define psp_ring_destroy(psp, type) ((psp)->funcs->ring_destroy((psp), (type)))
#define psp_init_microcode(psp) \
- ((psp)->init_microcode ? (psp)->init_microcode((psp)) : 0)
+ ((psp)->funcs->init_microcode ? (psp)->funcs->init_microcode((psp)) : 0)
+#define psp_bootloader_load_kdb(psp) \
+ ((psp)->funcs->bootloader_load_kdb ? (psp)->funcs->bootloader_load_kdb((psp)) : 0)
+#define psp_bootloader_load_spl(psp) \
+ ((psp)->funcs->bootloader_load_spl ? (psp)->funcs->bootloader_load_spl((psp)) : 0)
#define psp_bootloader_load_sysdrv(psp) \
- ((psp)->bootloader_load_sysdrv ? (psp)->bootloader_load_sysdrv((psp)) : 0)
+ ((psp)->funcs->bootloader_load_sysdrv ? (psp)->funcs->bootloader_load_sysdrv((psp)) : 0)
+#define psp_bootloader_load_soc_drv(psp) \
+ ((psp)->funcs->bootloader_load_soc_drv ? (psp)->funcs->bootloader_load_soc_drv((psp)) : 0)
+#define psp_bootloader_load_intf_drv(psp) \
+ ((psp)->funcs->bootloader_load_intf_drv ? (psp)->funcs->bootloader_load_intf_drv((psp)) : 0)
+#define psp_bootloader_load_dbg_drv(psp) \
+ ((psp)->funcs->bootloader_load_dbg_drv ? (psp)->funcs->bootloader_load_dbg_drv((psp)) : 0)
+#define psp_bootloader_load_ras_drv(psp) \
+ ((psp)->funcs->bootloader_load_ras_drv ? \
+ (psp)->funcs->bootloader_load_ras_drv((psp)) : 0)
+#define psp_bootloader_load_ipkeymgr_drv(psp) \
+ ((psp)->funcs->bootloader_load_ipkeymgr_drv ? \
+ (psp)->funcs->bootloader_load_ipkeymgr_drv((psp)) : 0)
+#define psp_bootloader_load_spdm_drv(psp) \
+ ((psp)->funcs->bootloader_load_spdm_drv ? \
+ (psp)->funcs->bootloader_load_spdm_drv((psp)) : 0)
#define psp_bootloader_load_sos(psp) \
- ((psp)->bootloader_load_sos ? (psp)->bootloader_load_sos((psp)) : 0)
+ ((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \
- ((psp)->smu_reload_quirk ? (psp)->smu_reload_quirk((psp)) : false)
+ ((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false)
+#define psp_mode1_reset(psp) \
+ ((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false)
+#define psp_mem_training(psp, ops) \
+ ((psp)->funcs->mem_training ? (psp)->funcs->mem_training((psp), (ops)) : 0)
+
+#define psp_ring_get_wptr(psp) (psp)->funcs->ring_get_wptr((psp))
+#define psp_ring_set_wptr(psp, value) (psp)->funcs->ring_set_wptr((psp), (value))
+
+#define psp_load_usbc_pd_fw(psp, fw_pri_mc_addr) \
+ ((psp)->funcs->load_usbc_pd_fw ? \
+ (psp)->funcs->load_usbc_pd_fw((psp), (fw_pri_mc_addr)) : -EINVAL)
+
+#define psp_read_usbc_pd_fw(psp, fw_ver) \
+ ((psp)->funcs->read_usbc_pd_fw ? \
+ (psp)->funcs->read_usbc_pd_fw((psp), fw_ver) : -EINVAL)
+
+#define psp_update_spirom(psp, fw_pri_mc_addr) \
+ ((psp)->funcs->update_spirom ? \
+ (psp)->funcs->update_spirom((psp), fw_pri_mc_addr) : -EINVAL)
+
+#define psp_dump_spirom(psp, fw_pri_mc_addr) \
+ ((psp)->funcs->dump_spirom ? \
+ (psp)->funcs->dump_spirom((psp), fw_pri_mc_addr) : -EINVAL)
+
+#define psp_vbflash_status(psp) \
+ ((psp)->funcs->vbflash_stat ? \
+ (psp)->funcs->vbflash_stat((psp)) : -EINVAL)
+
+#define psp_fatal_error_recovery_quirk(psp) \
+ ((psp)->funcs->fatal_error_recovery_quirk ? \
+ (psp)->funcs->fatal_error_recovery_quirk((psp)) : 0)
+
+#define psp_is_aux_sos_load_required(psp) \
+ ((psp)->funcs->is_aux_sos_load_required ? (psp)->funcs->is_aux_sos_load_required((psp)) : 0)
+
+#define psp_reg_program_no_ring(psp, val, id) \
+ ((psp)->funcs->reg_program_no_ring ? \
+ (psp)->funcs->reg_program_no_ring((psp), val, id) : -EINVAL)
extern const struct amd_ip_funcs psp_ip_funcs;
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
-extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
- uint32_t field_val, uint32_t mask, bool check_changed);
+extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
+extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
+extern const struct amdgpu_ip_block_version psp_v11_0_8_ip_block;
+extern const struct amdgpu_ip_block_version psp_v12_0_ip_block;
+extern const struct amdgpu_ip_block_version psp_v13_0_ip_block;
+extern const struct amdgpu_ip_block_version psp_v13_0_4_ip_block;
+extern const struct amdgpu_ip_block_version psp_v14_0_ip_block;
+
+int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
+ uint32_t field_val, uint32_t mask, uint32_t flags);
+extern int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index,
+ uint32_t field_val, uint32_t mask, uint32_t msec_timeout);
+
+int psp_execute_ip_fw_load(struct psp_context *psp,
+ struct amdgpu_firmware_info *ucode);
+
+int psp_gpu_reset(struct amdgpu_device *adev);
+
+int psp_ta_init_shared_buf(struct psp_context *psp,
+ struct ta_mem_context *mem_ctx);
+void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx);
+int psp_ta_unload(struct psp_context *psp, struct ta_context *context);
+int psp_ta_load(struct psp_context *psp, struct ta_context *context);
+int psp_ta_invoke(struct psp_context *psp,
+ uint32_t ta_cmd_id,
+ struct ta_context *context);
+
+int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta);
+int psp_xgmi_terminate(struct psp_context *psp);
+int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id);
+int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id);
+int psp_xgmi_get_topology_info(struct psp_context *psp,
+ int number_devices,
+ struct psp_xgmi_topology_info *topology,
+ bool get_extended_data);
+int psp_xgmi_set_topology_info(struct psp_context *psp,
+ int number_devices,
+ struct psp_xgmi_topology_info *topology);
+int psp_ras_initialize(struct psp_context *psp);
+int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+int psp_ras_enable_features(struct psp_context *psp,
+ union ta_ras_cmd_input *info, bool enable);
+int psp_ras_trigger_error(struct psp_context *psp,
+ struct ta_ras_trigger_error_input *info, uint32_t instance_mask);
+int psp_ras_terminate(struct psp_context *psp);
+int psp_ras_query_address(struct psp_context *psp,
+ struct ta_ras_query_address_input *addr_in,
+ struct ta_ras_query_address_output *addr_out);
+
+int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status);
+int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+
+int psp_rlc_autoload_start(struct psp_context *psp);
+
+int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
+ uint32_t value);
+int psp_ring_cmd_submit(struct psp_context *psp,
+ uint64_t cmd_buf_mc_addr,
+ uint64_t fence_mc_addr,
+ int index);
+int psp_init_asd_microcode(struct psp_context *psp,
+ const char *chip_name);
+int psp_init_toc_microcode(struct psp_context *psp,
+ const char *chip_name);
+int psp_init_sos_microcode(struct psp_context *psp,
+ const char *chip_name);
+int psp_init_ta_microcode(struct psp_context *psp,
+ const char *chip_name);
+int psp_init_cap_microcode(struct psp_context *psp,
+ const char *chip_name);
+int psp_get_fw_attestation_records_addr(struct psp_context *psp,
+ uint64_t *output_ptr);
+int psp_update_fw_reservation(struct psp_context *psp);
+int psp_load_fw_list(struct psp_context *psp,
+ struct amdgpu_firmware_info **ucode_list, int ucode_count);
+void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size);
+
+int psp_spatial_partition(struct psp_context *psp, int mode);
+int psp_memory_partition(struct psp_context *psp, int mode);
+
+int is_psp_fw_valid(struct psp_bin_desc bin);
+
+int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev);
+bool amdgpu_psp_get_ras_capability(struct psp_context *psp);
+
+int psp_config_sq_perfmon(struct psp_context *psp, uint32_t xcp_id,
+ bool core_override_enable, bool reg_override_enable, bool perfmon_override_enable);
+bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev);
+int amdgpu_psp_reg_program_no_ring(struct psp_context *psp, uint32_t val,
+ enum psp_reg_prog_id id);
+void amdgpu_psp_debugfs_init(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
new file mode 100644
index 000000000000..6e8aad91bcd3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
@@ -0,0 +1,392 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_psp_ta.h"
+
+#if defined(CONFIG_DEBUG_FS)
+
+static ssize_t ta_if_load_debugfs_write(struct file *fp, const char *buf,
+ size_t len, loff_t *off);
+static ssize_t ta_if_unload_debugfs_write(struct file *fp, const char *buf,
+ size_t len, loff_t *off);
+static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf,
+ size_t len, loff_t *off);
+
+static uint32_t get_bin_version(const uint8_t *bin)
+{
+ const struct common_firmware_header *hdr =
+ (const struct common_firmware_header *)bin;
+
+ return hdr->ucode_version;
+}
+
+static int prep_ta_mem_context(struct ta_mem_context *mem_context,
+ uint8_t *shared_buf,
+ uint32_t shared_buf_len)
+{
+ if (mem_context->shared_mem_size < shared_buf_len)
+ return -EINVAL;
+ memset(mem_context->shared_buf, 0, mem_context->shared_mem_size);
+ memcpy((void *)mem_context->shared_buf, shared_buf, shared_buf_len);
+
+ return 0;
+}
+
+static bool is_ta_type_valid(enum ta_type_id ta_type)
+{
+ switch (ta_type) {
+ case TA_TYPE_RAS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct ta_funcs ras_ta_funcs = {
+ .fn_ta_initialize = psp_ras_initialize,
+ .fn_ta_invoke = psp_ras_invoke,
+ .fn_ta_terminate = psp_ras_terminate
+};
+
+static void set_ta_context_funcs(struct psp_context *psp,
+ enum ta_type_id ta_type,
+ struct ta_context **pcontext)
+{
+ switch (ta_type) {
+ case TA_TYPE_RAS:
+ *pcontext = &psp->ras_context.context;
+ psp->ta_funcs = &ras_ta_funcs;
+ break;
+ default:
+ break;
+ }
+}
+
+static const struct file_operations ta_load_debugfs_fops = {
+ .write = ta_if_load_debugfs_write,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE
+};
+
+static const struct file_operations ta_unload_debugfs_fops = {
+ .write = ta_if_unload_debugfs_write,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE
+};
+
+static const struct file_operations ta_invoke_debugfs_fops = {
+ .write = ta_if_invoke_debugfs_write,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE
+};
+
+/*
+ * DOC: AMDGPU TA debugfs interfaces
+ *
+ * Three debugfs interfaces can be opened by a program to
+ * load/invoke/unload TA,
+ *
+ * - /sys/kernel/debug/dri/<N>/ta_if/ta_load
+ * - /sys/kernel/debug/dri/<N>/ta_if/ta_invoke
+ * - /sys/kernel/debug/dri/<N>/ta_if/ta_unload
+ *
+ * How to use the interfaces in a program?
+ *
+ * A program needs to provide transmit buffer to the interfaces
+ * and will receive buffer from the interfaces below,
+ *
+ * - For TA load debugfs interface:
+ * Transmit buffer:
+ * - TA type (4bytes)
+ * - TA bin length (4bytes)
+ * - TA bin
+ * Receive buffer:
+ * - TA ID (4bytes)
+ *
+ * - For TA invoke debugfs interface:
+ * Transmit buffer:
+ * - TA type (4bytes)
+ * - TA ID (4bytes)
+ * - TA CMD ID (4bytes)
+ * - TA shard buf length
+ * (4bytes, value not beyond TA shared memory size)
+ * - TA shared buf
+ * Receive buffer:
+ * - TA shared buf
+ *
+ * - For TA unload debugfs interface:
+ * Transmit buffer:
+ * - TA type (4bytes)
+ * - TA ID (4bytes)
+ */
+
+static ssize_t ta_if_load_debugfs_write(struct file *fp, const char *buf, size_t len, loff_t *off)
+{
+ uint32_t ta_type = 0;
+ uint32_t ta_bin_len = 0;
+ uint8_t *ta_bin = NULL;
+ uint32_t copy_pos = 0;
+ int ret = 0;
+
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(fp)->i_private;
+ struct psp_context *psp = &adev->psp;
+ struct ta_context *context = NULL;
+
+ if (!buf)
+ return -EINVAL;
+
+ ret = copy_from_user((void *)&ta_type, &buf[copy_pos], sizeof(uint32_t));
+ if (ret || (!is_ta_type_valid(ta_type)))
+ return -EFAULT;
+
+ copy_pos += sizeof(uint32_t);
+
+ ret = copy_from_user((void *)&ta_bin_len, &buf[copy_pos], sizeof(uint32_t));
+ if (ret)
+ return -EFAULT;
+
+ if (ta_bin_len > PSP_1_MEG)
+ return -EINVAL;
+
+ copy_pos += sizeof(uint32_t);
+
+ ta_bin = memdup_user(&buf[copy_pos], ta_bin_len);
+ if (IS_ERR(ta_bin))
+ return PTR_ERR(ta_bin);
+
+ /* Set TA context and functions */
+ set_ta_context_funcs(psp, ta_type, &context);
+
+ if (!psp->ta_funcs || !psp->ta_funcs->fn_ta_terminate) {
+ dev_err(adev->dev, "Unsupported function to terminate TA\n");
+ ret = -EOPNOTSUPP;
+ goto err_free_bin;
+ }
+
+ /*
+ * Allocate TA shared buf in case shared buf was freed
+ * due to loading TA failed before.
+ */
+ if (!context->mem_context.shared_buf) {
+ ret = psp_ta_init_shared_buf(psp, &context->mem_context);
+ if (ret) {
+ ret = -ENOMEM;
+ goto err_free_bin;
+ }
+ }
+
+ ret = psp_fn_ta_terminate(psp);
+ if (ret || context->resp_status) {
+ dev_err(adev->dev,
+ "Failed to unload embedded TA (%d) and status (0x%X)\n",
+ ret, context->resp_status);
+ if (!ret)
+ ret = -EINVAL;
+ goto err_free_ta_shared_buf;
+ }
+
+ /* Prepare TA context for TA initialization */
+ context->ta_type = ta_type;
+ context->bin_desc.fw_version = get_bin_version(ta_bin);
+ context->bin_desc.size_bytes = ta_bin_len;
+ context->bin_desc.start_addr = ta_bin;
+
+ if (!psp->ta_funcs->fn_ta_initialize) {
+ dev_err(adev->dev, "Unsupported function to initialize TA\n");
+ ret = -EOPNOTSUPP;
+ goto err_free_ta_shared_buf;
+ }
+
+ ret = psp_fn_ta_initialize(psp);
+ if (ret || context->resp_status) {
+ dev_err(adev->dev, "Failed to load TA via debugfs (%d) and status (0x%X)\n",
+ ret, context->resp_status);
+ if (!ret)
+ ret = -EINVAL;
+ goto err_free_ta_shared_buf;
+ }
+
+ if (copy_to_user((char *)buf, (void *)&context->session_id, sizeof(uint32_t)))
+ ret = -EFAULT;
+
+err_free_ta_shared_buf:
+ /* Only free TA shared buf when returns error code */
+ if (ret && context->mem_context.shared_buf)
+ psp_ta_free_shared_buf(&context->mem_context);
+err_free_bin:
+ kfree(ta_bin);
+
+ return ret;
+}
+
+static ssize_t ta_if_unload_debugfs_write(struct file *fp, const char *buf, size_t len, loff_t *off)
+{
+ uint32_t ta_type = 0;
+ uint32_t ta_id = 0;
+ uint32_t copy_pos = 0;
+ int ret = 0;
+
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(fp)->i_private;
+ struct psp_context *psp = &adev->psp;
+ struct ta_context *context = NULL;
+
+ if (!buf)
+ return -EINVAL;
+
+ ret = copy_from_user((void *)&ta_type, &buf[copy_pos], sizeof(uint32_t));
+ if (ret || (!is_ta_type_valid(ta_type)))
+ return -EFAULT;
+
+ copy_pos += sizeof(uint32_t);
+
+ ret = copy_from_user((void *)&ta_id, &buf[copy_pos], sizeof(uint32_t));
+ if (ret)
+ return -EFAULT;
+
+ set_ta_context_funcs(psp, ta_type, &context);
+ context->session_id = ta_id;
+
+ if (!psp->ta_funcs || !psp->ta_funcs->fn_ta_terminate) {
+ dev_err(adev->dev, "Unsupported function to terminate TA\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = psp_fn_ta_terminate(psp);
+ if (ret || context->resp_status) {
+ dev_err(adev->dev, "Failed to unload TA via debugfs (%d) and status (0x%X)\n",
+ ret, context->resp_status);
+ if (!ret)
+ ret = -EINVAL;
+ }
+
+ if (context->mem_context.shared_buf)
+ psp_ta_free_shared_buf(&context->mem_context);
+
+ return ret;
+}
+
+static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size_t len, loff_t *off)
+{
+ uint32_t ta_type = 0;
+ uint32_t ta_id = 0;
+ uint32_t cmd_id = 0;
+ uint32_t shared_buf_len = 0;
+ uint8_t *shared_buf = NULL;
+ uint32_t copy_pos = 0;
+ int ret = 0;
+
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(fp)->i_private;
+ struct psp_context *psp = &adev->psp;
+ struct ta_context *context = NULL;
+
+ if (!buf)
+ return -EINVAL;
+
+ ret = copy_from_user((void *)&ta_type, &buf[copy_pos], sizeof(uint32_t));
+ if (ret)
+ return -EFAULT;
+ copy_pos += sizeof(uint32_t);
+
+ ret = copy_from_user((void *)&ta_id, &buf[copy_pos], sizeof(uint32_t));
+ if (ret)
+ return -EFAULT;
+ copy_pos += sizeof(uint32_t);
+
+ ret = copy_from_user((void *)&cmd_id, &buf[copy_pos], sizeof(uint32_t));
+ if (ret)
+ return -EFAULT;
+ copy_pos += sizeof(uint32_t);
+
+ ret = copy_from_user((void *)&shared_buf_len, &buf[copy_pos], sizeof(uint32_t));
+ if (ret)
+ return -EFAULT;
+ copy_pos += sizeof(uint32_t);
+
+ shared_buf = memdup_user(&buf[copy_pos], shared_buf_len);
+ if (IS_ERR(shared_buf))
+ return PTR_ERR(shared_buf);
+
+ set_ta_context_funcs(psp, ta_type, &context);
+
+ if (!context || !context->initialized) {
+ dev_err(adev->dev, "TA is not initialized\n");
+ ret = -EINVAL;
+ goto err_free_shared_buf;
+ }
+
+ if (!psp->ta_funcs || !psp->ta_funcs->fn_ta_invoke) {
+ dev_err(adev->dev, "Unsupported function to invoke TA\n");
+ ret = -EOPNOTSUPP;
+ goto err_free_shared_buf;
+ }
+
+ context->session_id = ta_id;
+
+ mutex_lock(&psp->ras_context.mutex);
+ ret = prep_ta_mem_context(&context->mem_context, shared_buf, shared_buf_len);
+ if (ret)
+ goto err_free_shared_buf;
+
+ ret = psp_fn_ta_invoke(psp, cmd_id);
+ if (ret || context->resp_status) {
+ dev_err(adev->dev, "Failed to invoke TA via debugfs (%d) and status (0x%X)\n",
+ ret, context->resp_status);
+ if (!ret) {
+ ret = -EINVAL;
+ goto err_free_shared_buf;
+ }
+ }
+
+ if (copy_to_user((char *)&buf[copy_pos], context->mem_context.shared_buf, shared_buf_len))
+ ret = -EFAULT;
+
+err_free_shared_buf:
+ mutex_unlock(&psp->ras_context.mutex);
+ kfree(shared_buf);
+
+ return ret;
+}
+
+void amdgpu_ta_if_debugfs_init(struct amdgpu_device *adev)
+{
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+
+ struct dentry *dir = debugfs_create_dir("ta_if", minor->debugfs_root);
+
+ debugfs_create_file("ta_load", 0200, dir, adev,
+ &ta_load_debugfs_fops);
+
+ debugfs_create_file("ta_unload", 0200, dir,
+ adev, &ta_unload_debugfs_fops);
+
+ debugfs_create_file("ta_invoke", 0200, dir,
+ adev, &ta_invoke_debugfs_fops);
+}
+
+#else
+void amdgpu_ta_if_debugfs_init(struct amdgpu_device *adev)
+{
+
+}
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.h
new file mode 100644
index 000000000000..14cd1c81c3e6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_PSP_TA_H__
+#define __AMDGPU_PSP_TA_H__
+
+/* Calling set_ta_context_funcs is required before using the following macros */
+#define psp_fn_ta_initialize(psp) ((psp)->ta_funcs->fn_ta_initialize((psp)))
+#define psp_fn_ta_invoke(psp, ta_cmd_id) ((psp)->ta_funcs->fn_ta_invoke((psp), (ta_cmd_id)))
+#define psp_fn_ta_terminate(psp) ((psp)->ta_funcs->fn_ta_terminate((psp)))
+
+void amdgpu_ta_if_debugfs_init(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
new file mode 100644
index 000000000000..123bcf5c2bb1
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+
+#include "amdgpu.h"
+#include "amdgpu_rap.h"
+
+/**
+ * DOC: AMDGPU RAP debugfs test interface
+ *
+ * how to use?
+ * echo opcode > <debugfs_dir>/dri/xxx/rap_test
+ *
+ * opcode:
+ * currently, only 2 is supported by Linux host driver,
+ * opcode 2 stands for TA_CMD_RAP__VALIDATE_L0, used to
+ * trigger L0 policy validation, you can refer more detail
+ * from header file ta_rap_if.h
+ *
+ */
+static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ struct ta_rap_shared_memory *rap_shared_mem;
+ struct ta_rap_cmd_output_data *rap_cmd_output;
+ struct drm_device *dev = adev_to_drm(adev);
+ uint32_t op;
+ enum ta_rap_status status;
+ int ret;
+
+ if (*pos || size != 2)
+ return -EINVAL;
+
+ ret = kstrtouint_from_user(buf, size, *pos, &op);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
+ }
+
+ /* make sure gfx core is on, RAP TA cann't handle
+ * GFX OFF case currently.
+ */
+ amdgpu_gfx_off_ctrl(adev, false);
+
+ switch (op) {
+ case 2:
+ ret = psp_rap_invoke(&adev->psp, op, &status);
+ if (!ret && status == TA_RAP_STATUS__SUCCESS) {
+ dev_info(adev->dev, "RAP L0 validate test success.\n");
+ } else {
+ rap_shared_mem = (struct ta_rap_shared_memory *)
+ adev->psp.rap_context.context.mem_context.shared_buf;
+ rap_cmd_output = &(rap_shared_mem->rap_out_message.output);
+
+ dev_info(adev->dev, "RAP test failed, the output is:\n");
+ dev_info(adev->dev, "\tlast_subsection: 0x%08x.\n",
+ rap_cmd_output->last_subsection);
+ dev_info(adev->dev, "\tnum_total_validate: 0x%08x.\n",
+ rap_cmd_output->num_total_validate);
+ dev_info(adev->dev, "\tnum_valid: 0x%08x.\n",
+ rap_cmd_output->num_valid);
+ dev_info(adev->dev, "\tlast_validate_addr: 0x%08x.\n",
+ rap_cmd_output->last_validate_addr);
+ dev_info(adev->dev, "\tlast_validate_val: 0x%08x.\n",
+ rap_cmd_output->last_validate_val);
+ dev_info(adev->dev, "\tlast_validate_val_exptd: 0x%08x.\n",
+ rap_cmd_output->last_validate_val_exptd);
+ }
+ break;
+ default:
+ dev_info(adev->dev, "Unsupported op id: %d, ", op);
+ dev_info(adev->dev, "Only support op 2(L0 validate test).\n");
+ break;
+ }
+
+ amdgpu_gfx_off_ctrl(adev, true);
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
+ return size;
+}
+
+static const struct file_operations amdgpu_rap_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .read = NULL,
+ .write = amdgpu_rap_debugfs_write,
+ .llseek = default_llseek
+};
+
+void amdgpu_rap_debugfs_init(struct amdgpu_device *adev)
+{
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+
+ if (!adev->psp.rap_context.context.initialized)
+ return;
+
+ debugfs_create_file("rap_test", S_IWUSR, minor->debugfs_root,
+ adev, &amdgpu_rap_debugfs_ops);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.h
new file mode 100644
index 000000000000..ec6d7632d3a0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#ifndef _AMDGPU_RAP_H
+#define _AMDGPU_RAP_H
+
+#include "amdgpu.h"
+
+void amdgpu_rap_debugfs_init(struct amdgpu_device *adev);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
new file mode 100644
index 000000000000..e0ee21150860
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -0,0 +1,5492 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/reboot.h>
+#include <linux/syscalls.h>
+#include <linux/pm_runtime.h>
+#include <linux/list_sort.h>
+
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_atomfirmware.h"
+#include "amdgpu_xgmi.h"
+#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
+#include "nbio_v4_3.h"
+#include "nbif_v6_3_1.h"
+#include "nbio_v7_9.h"
+#include "atom.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_psp.h"
+
+#ifdef CONFIG_X86_MCE_AMD
+#include <asm/mce.h>
+
+static bool notifier_registered;
+#endif
+static const char *RAS_FS_NAME = "ras";
+
+const char *ras_error_string[] = {
+ "none",
+ "parity",
+ "single_correctable",
+ "multi_uncorrectable",
+ "poison",
+};
+
+const char *ras_block_string[] = {
+ "umc",
+ "sdma",
+ "gfx",
+ "mmhub",
+ "athub",
+ "pcie_bif",
+ "hdp",
+ "xgmi_wafl",
+ "df",
+ "smn",
+ "sem",
+ "mp0",
+ "mp1",
+ "fuse",
+ "mca",
+ "vcn",
+ "jpeg",
+ "ih",
+ "mpio",
+ "mmsch",
+};
+
+const char *ras_mca_block_string[] = {
+ "mca_mp0",
+ "mca_mp1",
+ "mca_mpio",
+ "mca_iohc",
+};
+
+struct amdgpu_ras_block_list {
+ /* ras block link */
+ struct list_head node;
+
+ struct amdgpu_ras_block_object *ras_obj;
+};
+
+const char *get_ras_block_str(struct ras_common_if *ras_block)
+{
+ if (!ras_block)
+ return "NULL";
+
+ if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT ||
+ ras_block->block >= ARRAY_SIZE(ras_block_string))
+ return "OUT OF RANGE";
+
+ if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
+ return ras_mca_block_string[ras_block->sub_block_index];
+
+ return ras_block_string[ras_block->block];
+}
+
+#define ras_block_str(_BLOCK_) \
+ (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range")
+
+#define ras_err_str(i) (ras_error_string[ffs(i)])
+
+#define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
+
+/* inject address is 52 bits */
+#define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
+
+/* typical ECC bad page rate is 1 bad page per 100MB VRAM */
+#define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL)
+
+#define MAX_UMC_POISON_POLLING_TIME_ASYNC 10
+
+#define AMDGPU_RAS_RETIRE_PAGE_INTERVAL 100 //ms
+
+#define MAX_FLUSH_RETIRE_DWORK_TIMES 100
+
+#define BYPASS_ALLOCATED_ADDRESS 0x0
+#define BYPASS_INITIALIZATION_ADDRESS 0x1
+
+enum amdgpu_ras_retire_page_reservation {
+ AMDGPU_RAS_RETIRE_PAGE_RESERVED,
+ AMDGPU_RAS_RETIRE_PAGE_PENDING,
+ AMDGPU_RAS_RETIRE_PAGE_FAULT,
+};
+
+atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
+
+static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
+ uint64_t addr);
+static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+ uint64_t addr);
+
+static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev);
+static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev);
+
+#ifdef CONFIG_X86_MCE_AMD
+static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
+struct mce_notifier_adev_list {
+ struct amdgpu_device *devs[MAX_GPU_INSTANCE];
+ int num_gpu;
+};
+static struct mce_notifier_adev_list mce_adev_list;
+#endif
+
+void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
+{
+ if (adev && amdgpu_ras_get_context(adev))
+ amdgpu_ras_get_context(adev)->error_query_ready = ready;
+}
+
+static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
+{
+ if (adev && amdgpu_ras_get_context(adev))
+ return amdgpu_ras_get_context(adev)->error_query_ready;
+
+ return false;
+}
+
+static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address)
+{
+ struct ras_err_data err_data;
+ struct eeprom_table_record err_rec;
+ int ret;
+
+ ret = amdgpu_ras_check_bad_page(adev, address);
+ if (ret == -EINVAL) {
+ dev_warn(adev->dev,
+ "RAS WARN: input address 0x%llx is invalid.\n",
+ address);
+ return -EINVAL;
+ } else if (ret == 1) {
+ dev_warn(adev->dev,
+ "RAS WARN: 0x%llx has already been marked as bad page!\n",
+ address);
+ return 0;
+ }
+
+ ret = amdgpu_ras_error_data_init(&err_data);
+ if (ret)
+ return ret;
+
+ memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
+ err_data.err_addr = &err_rec;
+ amdgpu_umc_fill_error_record(&err_data, address, address, 0, 0);
+
+ if (amdgpu_bad_page_threshold != 0) {
+ amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
+ err_data.err_addr_cnt, false);
+ amdgpu_ras_save_bad_pages(adev, NULL);
+ }
+
+ amdgpu_ras_error_data_fini(&err_data);
+
+ dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
+ dev_warn(adev->dev, "Clear EEPROM:\n");
+ dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
+
+ return 0;
+}
+
+static int amdgpu_check_address_validity(struct amdgpu_device *adev,
+ uint64_t address, uint64_t flags)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_vram_block_info blk_info;
+ uint64_t page_pfns[32] = {0};
+ int i, ret, count;
+ bool hit = false;
+
+ if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0))
+ return 0;
+
+ if (amdgpu_sriov_vf(adev)) {
+ if (amdgpu_virt_check_vf_critical_region(adev, address, &hit))
+ return -EPERM;
+ return hit ? -EACCES : 0;
+ }
+
+ if ((address >= adev->gmc.mc_vram_size) ||
+ (address >= RAS_UMC_INJECT_ADDR_LIMIT))
+ return -EFAULT;
+
+ count = amdgpu_umc_lookup_bad_pages_in_a_row(adev,
+ address, page_pfns, ARRAY_SIZE(page_pfns));
+ if (count <= 0)
+ return -EPERM;
+
+ for (i = 0; i < count; i++) {
+ memset(&blk_info, 0, sizeof(blk_info));
+ ret = amdgpu_vram_mgr_query_address_block_info(&adev->mman.vram_mgr,
+ page_pfns[i] << AMDGPU_GPU_PAGE_SHIFT, &blk_info);
+ if (!ret) {
+ /* The input address that needs to be checked is allocated by
+ * current calling process, so it is necessary to exclude
+ * the calling process.
+ */
+ if ((flags == BYPASS_ALLOCATED_ADDRESS) &&
+ ((blk_info.task.pid != task_pid_nr(current)) ||
+ strncmp(blk_info.task.comm, current->comm, TASK_COMM_LEN)))
+ return -EACCES;
+ else if ((flags == BYPASS_INITIALIZATION_ADDRESS) &&
+ (blk_info.task.pid == con->init_task_pid) &&
+ !strncmp(blk_info.task.comm, con->init_task_comm, TASK_COMM_LEN))
+ return -EACCES;
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
+ ssize_t s;
+ char val[128];
+
+ if (amdgpu_ras_query_error_status(obj->adev, &info))
+ return -EINVAL;
+
+ /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */
+ if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
+ amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
+ if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
+ dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
+ }
+
+ s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
+ "ue", info.ue_count,
+ "ce", info.ce_count);
+ if (*pos >= s)
+ return 0;
+
+ s -= *pos;
+ s = min_t(u64, s, size);
+
+
+ if (copy_to_user(buf, &val[*pos], s))
+ return -EINVAL;
+
+ *pos += s;
+
+ return s;
+}
+
+static const struct file_operations amdgpu_ras_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_ras_debugfs_read,
+ .write = NULL,
+ .llseek = default_llseek
+};
+
+static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
+ *block_id = i;
+ if (strcmp(name, ras_block_string[i]) == 0)
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
+ const char __user *buf, size_t size,
+ loff_t *pos, struct ras_debug_if *data)
+{
+ ssize_t s = min_t(u64, 64, size);
+ char str[65];
+ char block_name[33];
+ char err[9] = "ue";
+ int op = -1;
+ int block_id;
+ uint32_t sub_block;
+ u64 address, value;
+ /* default value is 0 if the mask is not set by user */
+ u32 instance_mask = 0;
+
+ if (*pos)
+ return -EINVAL;
+ *pos = size;
+
+ memset(str, 0, sizeof(str));
+ memset(data, 0, sizeof(*data));
+
+ if (copy_from_user(str, buf, s))
+ return -EINVAL;
+
+ if (sscanf(str, "disable %32s", block_name) == 1)
+ op = 0;
+ else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
+ op = 1;
+ else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
+ op = 2;
+ else if (strstr(str, "retire_page") != NULL)
+ op = 3;
+ else if (strstr(str, "check_address") != NULL)
+ op = 4;
+ else if (str[0] && str[1] && str[2] && str[3])
+ /* ascii string, but commands are not matched. */
+ return -EINVAL;
+
+ if (op != -1) {
+ if (op == 3) {
+ if (sscanf(str, "%*s 0x%llx", &address) != 1 &&
+ sscanf(str, "%*s %llu", &address) != 1)
+ return -EINVAL;
+
+ data->op = op;
+ data->inject.address = address;
+
+ return 0;
+ } else if (op == 4) {
+ if (sscanf(str, "%*s 0x%llx 0x%llx", &address, &value) != 2 &&
+ sscanf(str, "%*s %llu %llu", &address, &value) != 2)
+ return -EINVAL;
+
+ data->op = op;
+ data->inject.address = address;
+ data->inject.value = value;
+ return 0;
+ }
+
+ if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
+ return -EINVAL;
+
+ data->head.block = block_id;
+ /* only ue, ce and poison errors are supported */
+ if (!memcmp("ue", err, 2))
+ data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ else if (!memcmp("ce", err, 2))
+ data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
+ else if (!memcmp("poison", err, 6))
+ data->head.type = AMDGPU_RAS_ERROR__POISON;
+ else
+ return -EINVAL;
+
+ data->op = op;
+
+ if (op == 2) {
+ if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x",
+ &sub_block, &address, &value, &instance_mask) != 4 &&
+ sscanf(str, "%*s %*s %*s %u %llu %llu %u",
+ &sub_block, &address, &value, &instance_mask) != 4 &&
+ sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
+ &sub_block, &address, &value) != 3 &&
+ sscanf(str, "%*s %*s %*s %u %llu %llu",
+ &sub_block, &address, &value) != 3)
+ return -EINVAL;
+ data->head.sub_block_index = sub_block;
+ data->inject.address = address;
+ data->inject.value = value;
+ data->inject.instance_mask = instance_mask;
+ }
+ } else {
+ if (size < sizeof(*data))
+ return -EINVAL;
+
+ if (copy_from_user(data, buf, sizeof(*data)))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev,
+ struct ras_debug_if *data)
+{
+ int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
+ uint32_t mask, inst_mask = data->inject.instance_mask;
+
+ /* no need to set instance mask if there is only one instance */
+ if (num_xcc <= 1 && inst_mask) {
+ data->inject.instance_mask = 0;
+ dev_dbg(adev->dev,
+ "RAS inject mask(0x%x) isn't supported and force it to 0.\n",
+ inst_mask);
+
+ return;
+ }
+
+ switch (data->head.block) {
+ case AMDGPU_RAS_BLOCK__GFX:
+ mask = GENMASK(num_xcc - 1, 0);
+ break;
+ case AMDGPU_RAS_BLOCK__SDMA:
+ mask = GENMASK(adev->sdma.num_instances - 1, 0);
+ break;
+ case AMDGPU_RAS_BLOCK__VCN:
+ case AMDGPU_RAS_BLOCK__JPEG:
+ mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0);
+ break;
+ default:
+ mask = inst_mask;
+ break;
+ }
+
+ /* remove invalid bits in instance mask */
+ data->inject.instance_mask &= mask;
+ if (inst_mask != data->inject.instance_mask)
+ dev_dbg(adev->dev,
+ "Adjust RAS inject mask 0x%x to 0x%x\n",
+ inst_mask, data->inject.instance_mask);
+}
+
+/**
+ * DOC: AMDGPU RAS debugfs control interface
+ *
+ * The control interface accepts struct ras_debug_if which has two members.
+ *
+ * First member: ras_debug_if::head or ras_debug_if::inject.
+ *
+ * head is used to indicate which IP block will be under control.
+ *
+ * head has four members, they are block, type, sub_block_index, name.
+ * block: which IP will be under control.
+ * type: what kind of error will be enabled/disabled/injected.
+ * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
+ * name: the name of IP.
+ *
+ * inject has three more members than head, they are address, value and mask.
+ * As their names indicate, inject operation will write the
+ * value to the address.
+ *
+ * The second member: struct ras_debug_if::op.
+ * It has three kinds of operations.
+ *
+ * - 0: disable RAS on the block. Take ::head as its data.
+ * - 1: enable RAS on the block. Take ::head as its data.
+ * - 2: inject errors on the block. Take ::inject as its data.
+ *
+ * How to use the interface?
+ *
+ * In a program
+ *
+ * Copy the struct ras_debug_if in your code and initialize it.
+ * Write the struct to the control interface.
+ *
+ * From shell
+ *
+ * .. code-block:: bash
+ *
+ * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
+ * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
+ * echo "inject <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl
+ *
+ * Where N, is the card which you want to affect.
+ *
+ * "disable" requires only the block.
+ * "enable" requires the block and error type.
+ * "inject" requires the block, error type, address, and value.
+ *
+ * The block is one of: umc, sdma, gfx, etc.
+ * see ras_block_string[] for details
+ *
+ * The error type is one of: ue, ce and poison where,
+ * ue is multi-uncorrectable
+ * ce is single-correctable
+ * poison is poison
+ *
+ * The sub-block is a the sub-block index, pass 0 if there is no sub-block.
+ * The address and value are hexadecimal numbers, leading 0x is optional.
+ * The mask means instance mask, is optional, default value is 0x1.
+ *
+ * For instance,
+ *
+ * .. code-block:: bash
+ *
+ * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
+ * echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl
+ * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
+ *
+ * How to check the result of the operation?
+ *
+ * To check disable/enable, see "ras" features at,
+ * /sys/class/drm/card[0/1/2...]/device/ras/features
+ *
+ * To check inject, see the corresponding error count at,
+ * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count
+ *
+ * .. note::
+ * Operations are only allowed on blocks which are supported.
+ * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask
+ * to see which blocks support RAS on a particular asic.
+ *
+ */
+static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f,
+ const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ struct ras_debug_if data;
+ int ret = 0;
+
+ if (!amdgpu_ras_get_error_query_ready(adev)) {
+ dev_warn(adev->dev, "RAS WARN: error injection "
+ "currently inaccessible\n");
+ return size;
+ }
+
+ ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
+ if (ret)
+ return ret;
+
+ if (data.op == 3) {
+ ret = amdgpu_reserve_page_direct(adev, data.inject.address);
+ if (!ret)
+ return size;
+ else
+ return ret;
+ } else if (data.op == 4) {
+ ret = amdgpu_check_address_validity(adev, data.inject.address, data.inject.value);
+ return ret ? ret : size;
+ }
+
+ if (!amdgpu_ras_is_supported(adev, data.head.block))
+ return -EINVAL;
+
+ switch (data.op) {
+ case 0:
+ ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
+ break;
+ case 1:
+ ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
+ break;
+ case 2:
+ /* umc ce/ue error injection for a bad page is not allowed */
+ if (data.head.block == AMDGPU_RAS_BLOCK__UMC)
+ ret = amdgpu_ras_check_bad_page(adev, data.inject.address);
+ if (ret == -EINVAL) {
+ dev_warn(adev->dev, "RAS WARN: input address 0x%llx is invalid.",
+ data.inject.address);
+ break;
+ } else if (ret == 1) {
+ dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has already been marked as bad!\n",
+ data.inject.address);
+ break;
+ }
+
+ amdgpu_ras_instance_mask_check(adev, &data);
+
+ /* data.inject.address is offset instead of absolute gpu address */
+ ret = amdgpu_ras_error_inject(adev, &data.inject);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+/**
+ * DOC: AMDGPU RAS debugfs EEPROM table reset interface
+ *
+ * Some boards contain an EEPROM which is used to persistently store a list of
+ * bad pages which experiences ECC errors in vram. This interface provides
+ * a way to reset the EEPROM, e.g., after testing error injection.
+ *
+ * Usage:
+ *
+ * .. code-block:: bash
+ *
+ * echo 1 > ../ras/ras_eeprom_reset
+ *
+ * will reset EEPROM table to 0 entries.
+ *
+ */
+static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f,
+ const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev =
+ (struct amdgpu_device *)file_inode(f)->i_private;
+ int ret;
+
+ ret = amdgpu_ras_eeprom_reset_table(
+ &(amdgpu_ras_get_context(adev)->eeprom_control));
+
+ if (!ret) {
+ /* Something was written to EEPROM.
+ */
+ amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS;
+ return size;
+ } else {
+ return ret;
+ }
+}
+
+static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
+ .owner = THIS_MODULE,
+ .read = NULL,
+ .write = amdgpu_ras_debugfs_ctrl_write,
+ .llseek = default_llseek
+};
+
+static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = {
+ .owner = THIS_MODULE,
+ .read = NULL,
+ .write = amdgpu_ras_debugfs_eeprom_write,
+ .llseek = default_llseek
+};
+
+/**
+ * DOC: AMDGPU RAS sysfs Error Count Interface
+ *
+ * It allows the user to read the error count for each IP block on the gpu through
+ * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
+ *
+ * It outputs the multiple lines which report the uncorrected (ue) and corrected
+ * (ce) error counts.
+ *
+ * The format of one line is below,
+ *
+ * [ce|ue]: count
+ *
+ * Example:
+ *
+ * .. code-block:: bash
+ *
+ * ue: 0
+ * ce: 1
+ *
+ */
+static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
+
+ if (!amdgpu_ras_get_error_query_ready(obj->adev))
+ return sysfs_emit(buf, "Query currently inaccessible\n");
+
+ if (amdgpu_ras_query_error_status(obj->adev, &info))
+ return -EINVAL;
+
+ if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
+ amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
+ if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
+ dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
+ }
+
+ if (info.head.block == AMDGPU_RAS_BLOCK__UMC)
+ return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
+ "ce", info.ce_count, "de", info.de_count);
+ else
+ return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count,
+ "ce", info.ce_count);
+}
+
+/* obj begin */
+
+#define get_obj(obj) do { (obj)->use++; } while (0)
+#define alive_obj(obj) ((obj)->use)
+
+static inline void put_obj(struct ras_manager *obj)
+{
+ if (obj && (--obj->use == 0)) {
+ list_del(&obj->node);
+ amdgpu_ras_error_data_fini(&obj->err_data);
+ }
+
+ if (obj && (obj->use < 0))
+ DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
+}
+
+/* make one obj and return it. */
+static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj;
+
+ if (!adev->ras_enabled || !con)
+ return NULL;
+
+ if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
+ return NULL;
+
+ if (head->block == AMDGPU_RAS_BLOCK__MCA) {
+ if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
+ return NULL;
+
+ obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
+ } else
+ obj = &con->objs[head->block];
+
+ /* already exist. return obj? */
+ if (alive_obj(obj))
+ return NULL;
+
+ if (amdgpu_ras_error_data_init(&obj->err_data))
+ return NULL;
+
+ obj->head = *head;
+ obj->adev = adev;
+ list_add(&obj->node, &con->head);
+ get_obj(obj);
+
+ return obj;
+}
+
+/* return an obj equal to head, or the first when head is NULL */
+struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj;
+ int i;
+
+ if (!adev->ras_enabled || !con)
+ return NULL;
+
+ if (head) {
+ if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
+ return NULL;
+
+ if (head->block == AMDGPU_RAS_BLOCK__MCA) {
+ if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
+ return NULL;
+
+ obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
+ } else
+ obj = &con->objs[head->block];
+
+ if (alive_obj(obj))
+ return obj;
+ } else {
+ for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
+ obj = &con->objs[i];
+ if (alive_obj(obj))
+ return obj;
+ }
+ }
+
+ return NULL;
+}
+/* obj end */
+
+/* feature ctl begin */
+static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ return adev->ras_hw_enabled & BIT(head->block);
+}
+
+static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ return con->features & BIT(head->block);
+}
+
+/*
+ * if obj is not created, then create one.
+ * set feature enable flag.
+ */
+static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
+ struct ras_common_if *head, int enable)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
+
+ /* If hardware does not support ras, then do not create obj.
+ * But if hardware support ras, we can create the obj.
+ * Ras framework checks con->hw_supported to see if it need do
+ * corresponding initialization.
+ * IP checks con->support to see if it need disable ras.
+ */
+ if (!amdgpu_ras_is_feature_allowed(adev, head))
+ return 0;
+
+ if (enable) {
+ if (!obj) {
+ obj = amdgpu_ras_create_obj(adev, head);
+ if (!obj)
+ return -EINVAL;
+ } else {
+ /* In case we create obj somewhere else */
+ get_obj(obj);
+ }
+ con->features |= BIT(head->block);
+ } else {
+ if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
+ con->features &= ~BIT(head->block);
+ put_obj(obj);
+ }
+ }
+
+ return 0;
+}
+
+/* wrapper of psp_ras_enable_features */
+int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
+ struct ras_common_if *head, bool enable)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ union ta_ras_cmd_input *info;
+ int ret;
+
+ if (!con)
+ return -EINVAL;
+
+ /* For non-gfx ip, do not enable ras feature if it is not allowed */
+ /* For gfx ip, regardless of feature support status, */
+ /* Force issue enable or disable ras feature commands */
+ if (head->block != AMDGPU_RAS_BLOCK__GFX &&
+ !amdgpu_ras_is_feature_allowed(adev, head))
+ return 0;
+
+ /* Only enable gfx ras feature from host side */
+ if (head->block == AMDGPU_RAS_BLOCK__GFX &&
+ !amdgpu_sriov_vf(adev) &&
+ !amdgpu_ras_intr_triggered()) {
+ info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ if (!enable) {
+ info->disable_features = (struct ta_ras_disable_features_input) {
+ .block_id = amdgpu_ras_block_to_ta(head->block),
+ .error_type = amdgpu_ras_error_to_ta(head->type),
+ };
+ } else {
+ info->enable_features = (struct ta_ras_enable_features_input) {
+ .block_id = amdgpu_ras_block_to_ta(head->block),
+ .error_type = amdgpu_ras_error_to_ta(head->type),
+ };
+ }
+
+ ret = psp_ras_enable_features(&adev->psp, info, enable);
+ if (ret) {
+ dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
+ enable ? "enable":"disable",
+ get_ras_block_str(head),
+ amdgpu_ras_is_poison_mode_supported(adev), ret);
+ kfree(info);
+ return ret;
+ }
+
+ kfree(info);
+ }
+
+ /* setup the obj */
+ __amdgpu_ras_feature_enable(adev, head, enable);
+
+ return 0;
+}
+
+/* Only used in device probe stage and called only once. */
+int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
+ struct ras_common_if *head, bool enable)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int ret;
+
+ if (!con)
+ return -EINVAL;
+
+ if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
+ if (enable) {
+ /* There is no harm to issue a ras TA cmd regardless of
+ * the currecnt ras state.
+ * If current state == target state, it will do nothing
+ * But sometimes it requests driver to reset and repost
+ * with error code -EAGAIN.
+ */
+ ret = amdgpu_ras_feature_enable(adev, head, 1);
+ /* With old ras TA, we might fail to enable ras.
+ * Log it and just setup the object.
+ * TODO need remove this WA in the future.
+ */
+ if (ret == -EINVAL) {
+ ret = __amdgpu_ras_feature_enable(adev, head, 1);
+ if (!ret)
+ dev_info(adev->dev,
+ "RAS INFO: %s setup object\n",
+ get_ras_block_str(head));
+ }
+ } else {
+ /* setup the object then issue a ras TA disable cmd.*/
+ ret = __amdgpu_ras_feature_enable(adev, head, 1);
+ if (ret)
+ return ret;
+
+ /* gfx block ras disable cmd must send to ras-ta */
+ if (head->block == AMDGPU_RAS_BLOCK__GFX)
+ con->features |= BIT(head->block);
+
+ ret = amdgpu_ras_feature_enable(adev, head, 0);
+
+ /* clean gfx block ras features flag */
+ if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
+ con->features &= ~BIT(head->block);
+ }
+ } else
+ ret = amdgpu_ras_feature_enable(adev, head, enable);
+
+ return ret;
+}
+
+static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
+ bool bypass)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj, *tmp;
+
+ list_for_each_entry_safe(obj, tmp, &con->head, node) {
+ /* bypass psp.
+ * aka just release the obj and corresponding flags
+ */
+ if (bypass) {
+ if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
+ break;
+ } else {
+ if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
+ break;
+ }
+ }
+
+ return con->features;
+}
+
+static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
+ bool bypass)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int i;
+ const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
+
+ for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
+ struct ras_common_if head = {
+ .block = i,
+ .type = default_ras_type,
+ .sub_block_index = 0,
+ };
+
+ if (i == AMDGPU_RAS_BLOCK__MCA)
+ continue;
+
+ if (bypass) {
+ /*
+ * bypass psp. vbios enable ras for us.
+ * so just create the obj
+ */
+ if (__amdgpu_ras_feature_enable(adev, &head, 1))
+ break;
+ } else {
+ if (amdgpu_ras_feature_enable(adev, &head, 1))
+ break;
+ }
+ }
+
+ for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
+ struct ras_common_if head = {
+ .block = AMDGPU_RAS_BLOCK__MCA,
+ .type = default_ras_type,
+ .sub_block_index = i,
+ };
+
+ if (bypass) {
+ /*
+ * bypass psp. vbios enable ras for us.
+ * so just create the obj
+ */
+ if (__amdgpu_ras_feature_enable(adev, &head, 1))
+ break;
+ } else {
+ if (amdgpu_ras_feature_enable(adev, &head, 1))
+ break;
+ }
+ }
+
+ return con->features;
+}
+/* feature ctl end */
+
+static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj,
+ enum amdgpu_ras_block block)
+{
+ if (!block_obj)
+ return -EINVAL;
+
+ if (block_obj->ras_comm.block == block)
+ return 0;
+
+ return -EINVAL;
+}
+
+static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, uint32_t sub_block_index)
+{
+ struct amdgpu_ras_block_list *node, *tmp;
+ struct amdgpu_ras_block_object *obj;
+
+ if (block >= AMDGPU_RAS_BLOCK__LAST)
+ return NULL;
+
+ list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
+ if (!node->ras_obj) {
+ dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
+ continue;
+ }
+
+ obj = node->ras_obj;
+ if (obj->ras_block_match) {
+ if (obj->ras_block_match(obj, block, sub_block_index) == 0)
+ return obj;
+ } else {
+ if (amdgpu_ras_block_match_default(obj, block) == 0)
+ return obj;
+ }
+ }
+
+ return NULL;
+}
+
+static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ int ret = 0;
+
+ /*
+ * choosing right query method according to
+ * whether smu support query error information
+ */
+ ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc));
+ if (ret == -EOPNOTSUPP) {
+ if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
+
+ /* umc query_ras_error_address is also responsible for clearing
+ * error status
+ */
+ if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
+ } else if (!ret) {
+ if (adev->umc.ras &&
+ adev->umc.ras->ecc_info_query_ras_error_count)
+ adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
+
+ if (adev->umc.ras &&
+ adev->umc.ras->ecc_info_query_ras_error_address)
+ adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
+ }
+}
+
+static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev,
+ struct ras_manager *ras_mgr,
+ struct ras_err_data *err_data,
+ struct ras_query_context *qctx,
+ const char *blk_name,
+ bool is_ue,
+ bool is_de)
+{
+ struct amdgpu_smuio_mcm_config_info *mcm_info;
+ struct ras_err_node *err_node;
+ struct ras_err_info *err_info;
+ u64 event_id = qctx->evid.event_id;
+
+ if (is_ue) {
+ for_each_ras_error(err_node, err_data) {
+ err_info = &err_node->err_info;
+ mcm_info = &err_info->mcm_info;
+ if (err_info->ue_count) {
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
+ "%lld new uncorrectable hardware errors detected in %s block\n",
+ mcm_info->socket_id,
+ mcm_info->die_id,
+ err_info->ue_count,
+ blk_name);
+ }
+ }
+
+ for_each_ras_error(err_node, &ras_mgr->err_data) {
+ err_info = &err_node->err_info;
+ mcm_info = &err_info->mcm_info;
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
+ "%lld uncorrectable hardware errors detected in total in %s block\n",
+ mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name);
+ }
+
+ } else {
+ if (is_de) {
+ for_each_ras_error(err_node, err_data) {
+ err_info = &err_node->err_info;
+ mcm_info = &err_info->mcm_info;
+ if (err_info->de_count) {
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
+ "%lld new deferred hardware errors detected in %s block\n",
+ mcm_info->socket_id,
+ mcm_info->die_id,
+ err_info->de_count,
+ blk_name);
+ }
+ }
+
+ for_each_ras_error(err_node, &ras_mgr->err_data) {
+ err_info = &err_node->err_info;
+ mcm_info = &err_info->mcm_info;
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
+ "%lld deferred hardware errors detected in total in %s block\n",
+ mcm_info->socket_id, mcm_info->die_id,
+ err_info->de_count, blk_name);
+ }
+ } else {
+ if (adev->debug_disable_ce_logs)
+ return;
+
+ for_each_ras_error(err_node, err_data) {
+ err_info = &err_node->err_info;
+ mcm_info = &err_info->mcm_info;
+ if (err_info->ce_count) {
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
+ "%lld new correctable hardware errors detected in %s block\n",
+ mcm_info->socket_id,
+ mcm_info->die_id,
+ err_info->ce_count,
+ blk_name);
+ }
+ }
+
+ for_each_ras_error(err_node, &ras_mgr->err_data) {
+ err_info = &err_node->err_info;
+ mcm_info = &err_info->mcm_info;
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d, "
+ "%lld correctable hardware errors detected in total in %s block\n",
+ mcm_info->socket_id, mcm_info->die_id,
+ err_info->ce_count, blk_name);
+ }
+ }
+ }
+}
+
+static inline bool err_data_has_source_info(struct ras_err_data *data)
+{
+ return !list_empty(&data->err_node_list);
+}
+
+static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev,
+ struct ras_query_if *query_if,
+ struct ras_err_data *err_data,
+ struct ras_query_context *qctx)
+{
+ struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, &query_if->head);
+ const char *blk_name = get_ras_block_str(&query_if->head);
+ u64 event_id = qctx->evid.event_id;
+
+ if (err_data->ce_count) {
+ if (err_data_has_source_info(err_data)) {
+ amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
+ blk_name, false, false);
+ } else if (!adev->aid_mask &&
+ adev->smuio.funcs &&
+ adev->smuio.funcs->get_socket_id &&
+ adev->smuio.funcs->get_die_id) {
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
+ "%ld correctable hardware errors "
+ "detected in %s block\n",
+ adev->smuio.funcs->get_socket_id(adev),
+ adev->smuio.funcs->get_die_id(adev),
+ ras_mgr->err_data.ce_count,
+ blk_name);
+ } else {
+ RAS_EVENT_LOG(adev, event_id, "%ld correctable hardware errors "
+ "detected in %s block\n",
+ ras_mgr->err_data.ce_count,
+ blk_name);
+ }
+ }
+
+ if (err_data->ue_count) {
+ if (err_data_has_source_info(err_data)) {
+ amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
+ blk_name, true, false);
+ } else if (!adev->aid_mask &&
+ adev->smuio.funcs &&
+ adev->smuio.funcs->get_socket_id &&
+ adev->smuio.funcs->get_die_id) {
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
+ "%ld uncorrectable hardware errors "
+ "detected in %s block\n",
+ adev->smuio.funcs->get_socket_id(adev),
+ adev->smuio.funcs->get_die_id(adev),
+ ras_mgr->err_data.ue_count,
+ blk_name);
+ } else {
+ RAS_EVENT_LOG(adev, event_id, "%ld uncorrectable hardware errors "
+ "detected in %s block\n",
+ ras_mgr->err_data.ue_count,
+ blk_name);
+ }
+ }
+
+ if (err_data->de_count) {
+ if (err_data_has_source_info(err_data)) {
+ amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, qctx,
+ blk_name, false, true);
+ } else if (!adev->aid_mask &&
+ adev->smuio.funcs &&
+ adev->smuio.funcs->get_socket_id &&
+ adev->smuio.funcs->get_die_id) {
+ RAS_EVENT_LOG(adev, event_id, "socket: %d, die: %d "
+ "%ld deferred hardware errors "
+ "detected in %s block\n",
+ adev->smuio.funcs->get_socket_id(adev),
+ adev->smuio.funcs->get_die_id(adev),
+ ras_mgr->err_data.de_count,
+ blk_name);
+ } else {
+ RAS_EVENT_LOG(adev, event_id, "%ld deferred hardware errors "
+ "detected in %s block\n",
+ ras_mgr->err_data.de_count,
+ blk_name);
+ }
+ }
+}
+
+static void amdgpu_ras_virt_error_generate_report(struct amdgpu_device *adev,
+ struct ras_query_if *query_if,
+ struct ras_err_data *err_data,
+ struct ras_query_context *qctx)
+{
+ unsigned long new_ue, new_ce, new_de;
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &query_if->head);
+ const char *blk_name = get_ras_block_str(&query_if->head);
+ u64 event_id = qctx->evid.event_id;
+
+ new_ce = err_data->ce_count - obj->err_data.ce_count;
+ new_ue = err_data->ue_count - obj->err_data.ue_count;
+ new_de = err_data->de_count - obj->err_data.de_count;
+
+ if (new_ce) {
+ RAS_EVENT_LOG(adev, event_id, "%lu correctable hardware errors "
+ "detected in %s block\n",
+ new_ce,
+ blk_name);
+ }
+
+ if (new_ue) {
+ RAS_EVENT_LOG(adev, event_id, "%lu uncorrectable hardware errors "
+ "detected in %s block\n",
+ new_ue,
+ blk_name);
+ }
+
+ if (new_de) {
+ RAS_EVENT_LOG(adev, event_id, "%lu deferred hardware errors "
+ "detected in %s block\n",
+ new_de,
+ blk_name);
+ }
+}
+
+static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data)
+{
+ struct ras_err_node *err_node;
+ struct ras_err_info *err_info;
+
+ if (err_data_has_source_info(err_data)) {
+ for_each_ras_error(err_node, err_data) {
+ err_info = &err_node->err_info;
+ amdgpu_ras_error_statistic_de_count(&obj->err_data,
+ &err_info->mcm_info, err_info->de_count);
+ amdgpu_ras_error_statistic_ce_count(&obj->err_data,
+ &err_info->mcm_info, err_info->ce_count);
+ amdgpu_ras_error_statistic_ue_count(&obj->err_data,
+ &err_info->mcm_info, err_info->ue_count);
+ }
+ } else {
+ /* for legacy asic path which doesn't has error source info */
+ obj->err_data.ue_count += err_data->ue_count;
+ obj->err_data.ce_count += err_data->ce_count;
+ obj->err_data.de_count += err_data->de_count;
+ }
+}
+
+static void amdgpu_ras_mgr_virt_error_data_statistics_update(struct ras_manager *obj,
+ struct ras_err_data *err_data)
+{
+ /* Host reports absolute counts */
+ obj->err_data.ue_count = err_data->ue_count;
+ obj->err_data.ce_count = err_data->ce_count;
+ obj->err_data.de_count = err_data->de_count;
+}
+
+static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
+{
+ struct ras_common_if head;
+
+ memset(&head, 0, sizeof(head));
+ head.block = blk;
+
+ return amdgpu_ras_find_obj(adev, &head);
+}
+
+int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
+ const struct aca_info *aca_info, void *data)
+{
+ struct ras_manager *obj;
+
+ /* in resume phase, no need to create aca fs node */
+ if (adev->in_suspend || amdgpu_reset_in_recovery(adev))
+ return 0;
+
+ obj = get_ras_manager(adev, blk);
+ if (!obj)
+ return -EINVAL;
+
+ return amdgpu_aca_add_handle(adev, &obj->aca_handle, ras_block_str(blk), aca_info, data);
+}
+
+int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk)
+{
+ struct ras_manager *obj;
+
+ obj = get_ras_manager(adev, blk);
+ if (!obj)
+ return -EINVAL;
+
+ amdgpu_aca_remove_handle(&obj->aca_handle);
+
+ return 0;
+}
+
+static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
+ enum aca_error_type type, struct ras_err_data *err_data,
+ struct ras_query_context *qctx)
+{
+ struct ras_manager *obj;
+
+ obj = get_ras_manager(adev, blk);
+ if (!obj)
+ return -EINVAL;
+
+ return amdgpu_aca_get_error_data(adev, &obj->aca_handle, type, err_data, qctx);
+}
+
+ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
+ struct aca_handle *handle, char *buf, void *data)
+{
+ struct ras_manager *obj = container_of(handle, struct ras_manager, aca_handle);
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
+
+ if (!amdgpu_ras_get_error_query_ready(obj->adev))
+ return sysfs_emit(buf, "Query currently inaccessible\n");
+
+ if (amdgpu_ras_query_error_status(obj->adev, &info))
+ return -EINVAL;
+
+ return sysfs_emit(buf, "%s: %lu\n%s: %lu\n%s: %lu\n", "ue", info.ue_count,
+ "ce", info.ce_count, "de", info.de_count);
+}
+
+static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
+ struct ras_query_if *info,
+ struct ras_err_data *err_data,
+ struct ras_query_context *qctx,
+ unsigned int error_query_mode)
+{
+ enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
+ struct amdgpu_ras_block_object *block_obj = NULL;
+ int ret;
+
+ if (blk == AMDGPU_RAS_BLOCK_COUNT)
+ return -EINVAL;
+
+ if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
+ return -EINVAL;
+
+ if (error_query_mode == AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
+ return amdgpu_virt_req_ras_err_count(adev, blk, err_data);
+ } else if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
+ if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
+ amdgpu_ras_get_ecc_info(adev, err_data);
+ } else {
+ block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
+ if (!block_obj || !block_obj->hw_ops) {
+ dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
+ get_ras_block_str(&info->head));
+ return -EINVAL;
+ }
+
+ if (block_obj->hw_ops->query_ras_error_count)
+ block_obj->hw_ops->query_ras_error_count(adev, err_data);
+
+ if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
+ (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
+ (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
+ if (block_obj->hw_ops->query_ras_error_status)
+ block_obj->hw_ops->query_ras_error_status(adev);
+ }
+ }
+ } else {
+ if (amdgpu_aca_is_enabled(adev)) {
+ ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_UE, err_data, qctx);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_CE, err_data, qctx);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_aca_log_ras_error_data(adev, blk, ACA_ERROR_TYPE_DEFERRED, err_data, qctx);
+ if (ret)
+ return ret;
+ } else {
+ /* FIXME: add code to check return value later */
+ amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data, qctx);
+ amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data, qctx);
+ }
+ }
+
+ return 0;
+}
+
+/* query/inject/cure begin */
+static int amdgpu_ras_query_error_status_with_event(struct amdgpu_device *adev,
+ struct ras_query_if *info,
+ enum ras_event_type type)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
+ struct ras_err_data err_data;
+ struct ras_query_context qctx;
+ unsigned int error_query_mode;
+ int ret;
+
+ if (!obj)
+ return -EINVAL;
+
+ ret = amdgpu_ras_error_data_init(&err_data);
+ if (ret)
+ return ret;
+
+ if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode))
+ return -EINVAL;
+
+ memset(&qctx, 0, sizeof(qctx));
+ qctx.evid.type = type;
+ qctx.evid.event_id = amdgpu_ras_acquire_event_id(adev, type);
+
+ if (!down_read_trylock(&adev->reset_domain->sem)) {
+ ret = -EIO;
+ goto out_fini_err_data;
+ }
+
+ ret = amdgpu_ras_query_error_status_helper(adev, info,
+ &err_data,
+ &qctx,
+ error_query_mode);
+ up_read(&adev->reset_domain->sem);
+ if (ret)
+ goto out_fini_err_data;
+
+ if (error_query_mode != AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY) {
+ amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
+ amdgpu_ras_error_generate_report(adev, info, &err_data, &qctx);
+ } else {
+ /* Host provides absolute error counts. First generate the report
+ * using the previous VF internal count against new host count.
+ * Then Update VF internal count.
+ */
+ amdgpu_ras_virt_error_generate_report(adev, info, &err_data, &qctx);
+ amdgpu_ras_mgr_virt_error_data_statistics_update(obj, &err_data);
+ }
+
+ info->ue_count = obj->err_data.ue_count;
+ info->ce_count = obj->err_data.ce_count;
+ info->de_count = obj->err_data.de_count;
+
+out_fini_err_data:
+ amdgpu_ras_error_data_fini(&err_data);
+
+ return ret;
+}
+
+int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info)
+{
+ return amdgpu_ras_query_error_status_with_event(adev, info, RAS_EVENT_TYPE_INVALID);
+}
+
+int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block)
+{
+ struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
+ const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
+ const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
+
+ if (!block_obj || !block_obj->hw_ops) {
+ dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
+ ras_block_str(block));
+ return -EOPNOTSUPP;
+ }
+
+ if (!amdgpu_ras_is_supported(adev, block) ||
+ !amdgpu_ras_get_aca_debug_mode(adev))
+ return -EOPNOTSUPP;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EOPNOTSUPP;
+
+ /* skip ras error reset in gpu reset */
+ if ((amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) &&
+ ((smu_funcs && smu_funcs->set_debug_mode) ||
+ (mca_funcs && mca_funcs->mca_set_debug_mode)))
+ return -EOPNOTSUPP;
+
+ if (block_obj->hw_ops->reset_ras_error_count)
+ block_obj->hw_ops->reset_ras_error_count(adev);
+
+ return 0;
+}
+
+int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block)
+{
+ struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
+
+ if (amdgpu_ras_reset_error_count(adev, block) == -EOPNOTSUPP)
+ return 0;
+
+ if ((block == AMDGPU_RAS_BLOCK__GFX) ||
+ (block == AMDGPU_RAS_BLOCK__MMHUB)) {
+ if (block_obj->hw_ops->reset_ras_error_status)
+ block_obj->hw_ops->reset_ras_error_status(adev);
+ }
+
+ return 0;
+}
+
+/* wrapper of psp_ras_trigger_error */
+int amdgpu_ras_error_inject(struct amdgpu_device *adev,
+ struct ras_inject_if *info)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
+ struct ta_ras_trigger_error_input block_info = {
+ .block_id = amdgpu_ras_block_to_ta(info->head.block),
+ .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
+ .sub_block_index = info->head.sub_block_index,
+ .address = info->address,
+ .value = info->value,
+ };
+ int ret = -EINVAL;
+ struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev,
+ info->head.block,
+ info->head.sub_block_index);
+
+ /* inject on guest isn't allowed, return success directly */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ if (!obj)
+ return -EINVAL;
+
+ if (!block_obj || !block_obj->hw_ops) {
+ dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
+ get_ras_block_str(&info->head));
+ return -EINVAL;
+ }
+
+ /* Calculate XGMI relative offset */
+ if (adev->gmc.xgmi.num_physical_nodes > 1 &&
+ info->head.block != AMDGPU_RAS_BLOCK__GFX) {
+ block_info.address =
+ amdgpu_xgmi_get_relative_phy_addr(adev,
+ block_info.address);
+ }
+
+ if (block_obj->hw_ops->ras_error_inject) {
+ if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
+ ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask);
+ else /* Special ras_error_inject is defined (e.g: xgmi) */
+ ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
+ info->instance_mask);
+ } else {
+ /* default path */
+ ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
+ }
+
+ if (ret)
+ dev_err(adev->dev, "ras inject %s failed %d\n",
+ get_ras_block_str(&info->head), ret);
+
+ return ret;
+}
+
+/**
+ * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
+ * @adev: pointer to AMD GPU device
+ * @ce_count: pointer to an integer to be set to the count of correctible errors.
+ * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
+ * @query_info: pointer to ras_query_if
+ *
+ * Return 0 for query success or do nothing, otherwise return an error
+ * on failures
+ */
+static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
+ unsigned long *ce_count,
+ unsigned long *ue_count,
+ struct ras_query_if *query_info)
+{
+ int ret;
+
+ if (!query_info)
+ /* do nothing if query_info is not specified */
+ return 0;
+
+ ret = amdgpu_ras_query_error_status(adev, query_info);
+ if (ret)
+ return ret;
+
+ *ce_count += query_info->ce_count;
+ *ue_count += query_info->ue_count;
+
+ /* some hardware/IP supports read to clear
+ * no need to explictly reset the err status after the query call */
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
+ amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) {
+ if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
+ dev_warn(adev->dev,
+ "Failed to reset error counter and error status\n");
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
+ * @adev: pointer to AMD GPU device
+ * @ce_count: pointer to an integer to be set to the count of correctible errors.
+ * @ue_count: pointer to an integer to be set to the count of uncorrectible
+ * errors.
+ * @query_info: pointer to ras_query_if if the query request is only for
+ * specific ip block; if info is NULL, then the qurey request is for
+ * all the ip blocks that support query ras error counters/status
+ *
+ * If set, @ce_count or @ue_count, count and return the corresponding
+ * error counts in those integer pointers. Return 0 if the device
+ * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
+ */
+int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
+ unsigned long *ce_count,
+ unsigned long *ue_count,
+ struct ras_query_if *query_info)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj;
+ unsigned long ce, ue;
+ int ret;
+
+ if (!adev->ras_enabled || !con)
+ return -EOPNOTSUPP;
+
+ /* Don't count since no reporting.
+ */
+ if (!ce_count && !ue_count)
+ return 0;
+
+ ce = 0;
+ ue = 0;
+ if (!query_info) {
+ /* query all the ip blocks that support ras query interface */
+ list_for_each_entry(obj, &con->head, node) {
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
+
+ ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
+ }
+ } else {
+ /* query specific ip block */
+ ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
+ }
+
+ if (ret)
+ return ret;
+
+ if (ce_count)
+ *ce_count = ce;
+
+ if (ue_count)
+ *ue_count = ue;
+
+ return 0;
+}
+/* query/inject/cure end */
+
+
+/* sysfs begin */
+
+static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
+ struct ras_badpage **bps, unsigned int *count);
+
+static char *amdgpu_ras_badpage_flags_str(unsigned int flags)
+{
+ switch (flags) {
+ case AMDGPU_RAS_RETIRE_PAGE_RESERVED:
+ return "R";
+ case AMDGPU_RAS_RETIRE_PAGE_PENDING:
+ return "P";
+ case AMDGPU_RAS_RETIRE_PAGE_FAULT:
+ default:
+ return "F";
+ }
+}
+
+/**
+ * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
+ *
+ * It allows user to read the bad pages of vram on the gpu through
+ * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
+ *
+ * It outputs multiple lines, and each line stands for one gpu page.
+ *
+ * The format of one line is below,
+ * gpu pfn : gpu page size : flags
+ *
+ * gpu pfn and gpu page size are printed in hex format.
+ * flags can be one of below character,
+ *
+ * R: reserved, this gpu page is reserved and not able to use.
+ *
+ * P: pending for reserve, this gpu page is marked as bad, will be reserved
+ * in next window of page_reserve.
+ *
+ * F: unable to reserve. this gpu page can't be reserved due to some reasons.
+ *
+ * Examples:
+ *
+ * .. code-block:: bash
+ *
+ * 0x00000001 : 0x00001000 : R
+ * 0x00000002 : 0x00001000 : P
+ *
+ */
+
+static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f,
+ struct kobject *kobj, const struct bin_attribute *attr,
+ char *buf, loff_t ppos, size_t count)
+{
+ struct amdgpu_ras *con =
+ container_of(attr, struct amdgpu_ras, badpages_attr);
+ struct amdgpu_device *adev = con->adev;
+ const unsigned int element_size =
+ sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
+ unsigned int start = div64_ul(ppos + element_size - 1, element_size);
+ unsigned int end = div64_ul(ppos + count - 1, element_size);
+ ssize_t s = 0;
+ struct ras_badpage *bps = NULL;
+ unsigned int bps_count = 0;
+
+ memset(buf, 0, count);
+
+ if (amdgpu_ras_badpages_read(adev, &bps, &bps_count))
+ return 0;
+
+ for (; start < end && start < bps_count; start++)
+ s += scnprintf(&buf[s], element_size + 1,
+ "0x%08x : 0x%08x : %1s\n",
+ bps[start].bp,
+ bps[start].size,
+ amdgpu_ras_badpage_flags_str(bps[start].flags));
+
+ kfree(bps);
+
+ return s;
+}
+
+static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct amdgpu_ras *con =
+ container_of(attr, struct amdgpu_ras, features_attr);
+
+ return sysfs_emit(buf, "feature mask: 0x%x\n", con->features);
+}
+
+static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct amdgpu_ras *con =
+ container_of(attr, struct amdgpu_ras, version_attr);
+ return sysfs_emit(buf, "table version: 0x%x\n", con->eeprom_control.tbl_hdr.version);
+}
+
+static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct amdgpu_ras *con =
+ container_of(attr, struct amdgpu_ras, schema_attr);
+ return sysfs_emit(buf, "schema: 0x%x\n", con->schema);
+}
+
+static struct {
+ enum ras_event_type type;
+ const char *name;
+} dump_event[] = {
+ {RAS_EVENT_TYPE_FATAL, "Fatal Error"},
+ {RAS_EVENT_TYPE_POISON_CREATION, "Poison Creation"},
+ {RAS_EVENT_TYPE_POISON_CONSUMPTION, "Poison Consumption"},
+};
+
+static ssize_t amdgpu_ras_sysfs_event_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct amdgpu_ras *con =
+ container_of(attr, struct amdgpu_ras, event_state_attr);
+ struct ras_event_manager *event_mgr = con->event_mgr;
+ struct ras_event_state *event_state;
+ int i, size = 0;
+
+ if (!event_mgr)
+ return -EINVAL;
+
+ size += sysfs_emit_at(buf, size, "current seqno: %llu\n", atomic64_read(&event_mgr->seqno));
+ for (i = 0; i < ARRAY_SIZE(dump_event); i++) {
+ event_state = &event_mgr->event_state[dump_event[i].type];
+ size += sysfs_emit_at(buf, size, "%s: count:%llu, last_seqno:%llu\n",
+ dump_event[i].name,
+ atomic64_read(&event_state->count),
+ event_state->last_seqno);
+ }
+
+ return (ssize_t)size;
+}
+
+static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (adev->dev->kobj.sd)
+ sysfs_remove_file_from_group(&adev->dev->kobj,
+ &con->badpages_attr.attr,
+ RAS_FS_NAME);
+}
+
+static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct attribute *attrs[] = {
+ &con->features_attr.attr,
+ &con->version_attr.attr,
+ &con->schema_attr.attr,
+ &con->event_state_attr.attr,
+ NULL
+ };
+ struct attribute_group group = {
+ .name = RAS_FS_NAME,
+ .attrs = attrs,
+ };
+
+ if (adev->dev->kobj.sd)
+ sysfs_remove_group(&adev->dev->kobj, &group);
+
+ return 0;
+}
+
+int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
+
+ if (amdgpu_aca_is_enabled(adev))
+ return 0;
+
+ if (!obj || obj->attr_inuse)
+ return -EINVAL;
+
+ if (amdgpu_sriov_vf(adev) && !amdgpu_virt_ras_telemetry_block_en(adev, head->block))
+ return 0;
+
+ get_obj(obj);
+
+ snprintf(obj->fs_data.sysfs_name, sizeof(obj->fs_data.sysfs_name),
+ "%s_err_count", head->name);
+
+ obj->sysfs_attr = (struct device_attribute){
+ .attr = {
+ .name = obj->fs_data.sysfs_name,
+ .mode = S_IRUGO,
+ },
+ .show = amdgpu_ras_sysfs_read,
+ };
+ sysfs_attr_init(&obj->sysfs_attr.attr);
+
+ if (sysfs_add_file_to_group(&adev->dev->kobj,
+ &obj->sysfs_attr.attr,
+ RAS_FS_NAME)) {
+ put_obj(obj);
+ return -EINVAL;
+ }
+
+ obj->attr_inuse = 1;
+
+ return 0;
+}
+
+int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
+
+ if (amdgpu_aca_is_enabled(adev))
+ return 0;
+
+ if (!obj || !obj->attr_inuse)
+ return -EINVAL;
+
+ if (adev->dev->kobj.sd)
+ sysfs_remove_file_from_group(&adev->dev->kobj,
+ &obj->sysfs_attr.attr,
+ RAS_FS_NAME);
+ obj->attr_inuse = 0;
+ put_obj(obj);
+
+ return 0;
+}
+
+static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj, *tmp;
+
+ list_for_each_entry_safe(obj, tmp, &con->head, node) {
+ amdgpu_ras_sysfs_remove(adev, &obj->head);
+ }
+
+ if (amdgpu_bad_page_threshold != 0)
+ amdgpu_ras_sysfs_remove_bad_page_node(adev);
+
+ amdgpu_ras_sysfs_remove_dev_attr_node(adev);
+
+ return 0;
+}
+/* sysfs end */
+
+/**
+ * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
+ *
+ * Normally when there is an uncorrectable error, the driver will reset
+ * the GPU to recover. However, in the event of an unrecoverable error,
+ * the driver provides an interface to reboot the system automatically
+ * in that event.
+ *
+ * The following file in debugfs provides that interface:
+ * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
+ *
+ * Usage:
+ *
+ * .. code-block:: bash
+ *
+ * echo true > .../ras/auto_reboot
+ *
+ */
+/* debugfs begin */
+static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control;
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *dir;
+
+ dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
+ debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
+ &amdgpu_ras_debugfs_ctrl_ops);
+ debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
+ &amdgpu_ras_debugfs_eeprom_ops);
+ debugfs_create_u32("bad_page_cnt_threshold", 0444, dir,
+ &con->bad_page_cnt_threshold);
+ debugfs_create_u32("ras_num_recs", 0444, dir, &eeprom->ras_num_recs);
+ debugfs_create_x32("ras_hw_enabled", 0444, dir, &adev->ras_hw_enabled);
+ debugfs_create_x32("ras_enabled", 0444, dir, &adev->ras_enabled);
+ debugfs_create_file("ras_eeprom_size", S_IRUGO, dir, adev,
+ &amdgpu_ras_debugfs_eeprom_size_ops);
+ con->de_ras_eeprom_table = debugfs_create_file("ras_eeprom_table",
+ S_IRUGO, dir, adev,
+ &amdgpu_ras_debugfs_eeprom_table_ops);
+ amdgpu_ras_debugfs_set_ret_size(&con->eeprom_control);
+
+ /*
+ * After one uncorrectable error happens, usually GPU recovery will
+ * be scheduled. But due to the known problem in GPU recovery failing
+ * to bring GPU back, below interface provides one direct way to
+ * user to reboot system automatically in such case within
+ * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
+ * will never be called.
+ */
+ debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
+
+ /*
+ * User could set this not to clean up hardware's error count register
+ * of RAS IPs during ras recovery.
+ */
+ debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
+ &con->disable_ras_err_cnt_harvest);
+ return dir;
+}
+
+static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
+ struct ras_fs_if *head,
+ struct dentry *dir)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
+
+ if (!obj || !dir)
+ return;
+
+ get_obj(obj);
+
+ memcpy(obj->fs_data.debugfs_name,
+ head->debugfs_name,
+ sizeof(obj->fs_data.debugfs_name));
+
+ debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
+ obj, &amdgpu_ras_debugfs_ops);
+}
+
+static bool amdgpu_ras_aca_is_supported(struct amdgpu_device *adev)
+{
+ bool ret;
+
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
+ case IP_VERSION(13, 0, 14):
+ ret = true;
+ break;
+ default:
+ ret = false;
+ break;
+ }
+
+ return ret;
+}
+
+void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct dentry *dir;
+ struct ras_manager *obj;
+ struct ras_fs_if fs_info;
+
+ /*
+ * it won't be called in resume path, no need to check
+ * suspend and gpu reset status
+ */
+ if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
+ return;
+
+ dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
+
+ list_for_each_entry(obj, &con->head, node) {
+ if (amdgpu_ras_is_supported(adev, obj->head.block) &&
+ (obj->attr_inuse == 1)) {
+ sprintf(fs_info.debugfs_name, "%s_err_inject",
+ get_ras_block_str(&obj->head));
+ fs_info.head = obj->head;
+ amdgpu_ras_debugfs_create(adev, &fs_info, dir);
+ }
+ }
+
+ if (amdgpu_ras_aca_is_supported(adev)) {
+ if (amdgpu_aca_is_enabled(adev))
+ amdgpu_aca_smu_debugfs_init(adev, dir);
+ else
+ amdgpu_mca_smu_debugfs_init(adev, dir);
+ }
+}
+
+/* debugfs end */
+
+/* ras fs */
+static const BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
+ amdgpu_ras_sysfs_badpages_read, NULL, 0);
+static DEVICE_ATTR(features, S_IRUGO,
+ amdgpu_ras_sysfs_features_read, NULL);
+static DEVICE_ATTR(version, 0444,
+ amdgpu_ras_sysfs_version_show, NULL);
+static DEVICE_ATTR(schema, 0444,
+ amdgpu_ras_sysfs_schema_show, NULL);
+static DEVICE_ATTR(event_state, 0444,
+ amdgpu_ras_sysfs_event_state_show, NULL);
+static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct attribute_group group = {
+ .name = RAS_FS_NAME,
+ };
+ struct attribute *attrs[] = {
+ &con->features_attr.attr,
+ &con->version_attr.attr,
+ &con->schema_attr.attr,
+ &con->event_state_attr.attr,
+ NULL
+ };
+ const struct bin_attribute *bin_attrs[] = {
+ NULL,
+ NULL,
+ };
+ int r;
+
+ group.attrs = attrs;
+
+ /* add features entry */
+ con->features_attr = dev_attr_features;
+ sysfs_attr_init(attrs[0]);
+
+ /* add version entry */
+ con->version_attr = dev_attr_version;
+ sysfs_attr_init(attrs[1]);
+
+ /* add schema entry */
+ con->schema_attr = dev_attr_schema;
+ sysfs_attr_init(attrs[2]);
+
+ /* add event_state entry */
+ con->event_state_attr = dev_attr_event_state;
+ sysfs_attr_init(attrs[3]);
+
+ if (amdgpu_bad_page_threshold != 0) {
+ /* add bad_page_features entry */
+ con->badpages_attr = bin_attr_gpu_vram_bad_pages;
+ sysfs_bin_attr_init(&con->badpages_attr);
+ bin_attrs[0] = &con->badpages_attr;
+ group.bin_attrs = bin_attrs;
+ }
+
+ r = sysfs_create_group(&adev->dev->kobj, &group);
+ if (r)
+ dev_err(adev->dev, "Failed to create RAS sysfs group!");
+
+ return 0;
+}
+
+static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *con_obj, *ip_obj, *tmp;
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
+ ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
+ if (ip_obj)
+ put_obj(ip_obj);
+ }
+ }
+
+ amdgpu_ras_sysfs_remove_all(adev);
+ return 0;
+}
+/* ras fs end */
+
+/* ih begin */
+
+/* For the hardware that cannot enable bif ring for both ras_controller_irq
+ * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status
+ * register to check whether the interrupt is triggered or not, and properly
+ * ack the interrupt if it is there
+ */
+void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
+{
+ /* Fatal error events are handled on host side */
+ if (amdgpu_sriov_vf(adev))
+ return;
+ /*
+ * If the current interrupt is caused by a non-fatal RAS error, skip
+ * check for fatal error. For fatal errors, FED status of all devices
+ * in XGMI hive gets set when the first device gets fatal error
+ * interrupt. The error gets propagated to other devices as well, so
+ * make sure to ack the interrupt regardless of FED status.
+ */
+ if (!amdgpu_ras_get_fed_status(adev) &&
+ amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY))
+ return;
+
+ if (adev->nbio.ras &&
+ adev->nbio.ras->handle_ras_controller_intr_no_bifring)
+ adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
+
+ if (adev->nbio.ras &&
+ adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
+ adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
+}
+
+static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj,
+ struct amdgpu_iv_entry *entry)
+{
+ bool poison_stat = false;
+ struct amdgpu_device *adev = obj->adev;
+ struct amdgpu_ras_block_object *block_obj =
+ amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ enum ras_event_type type = RAS_EVENT_TYPE_POISON_CONSUMPTION;
+ u64 event_id;
+ int ret;
+
+ if (!block_obj || !con)
+ return;
+
+ ret = amdgpu_ras_mark_ras_event(adev, type);
+ if (ret)
+ return;
+
+ amdgpu_ras_set_err_poison(adev, block_obj->ras_comm.block);
+ /* both query_poison_status and handle_poison_consumption are optional,
+ * but at least one of them should be implemented if we need poison
+ * consumption handler
+ */
+ if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
+ poison_stat = block_obj->hw_ops->query_poison_status(adev);
+ if (!poison_stat) {
+ /* Not poison consumption interrupt, no need to handle it */
+ dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
+ block_obj->ras_comm.name);
+
+ return;
+ }
+ }
+
+ amdgpu_umc_poison_handler(adev, obj->head.block, 0);
+
+ if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
+ poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
+
+ /* gpu reset is fallback for failed and default cases.
+ * For RMA case, amdgpu_umc_poison_handler will handle gpu reset.
+ */
+ if (poison_stat && !amdgpu_ras_is_rma(adev)) {
+ event_id = amdgpu_ras_acquire_event_id(adev, type);
+ RAS_EVENT_LOG(adev, event_id,
+ "GPU reset for %s RAS poison consumption is issued!\n",
+ block_obj->ras_comm.name);
+ amdgpu_ras_reset_gpu(adev);
+ }
+
+ if (!poison_stat)
+ amdgpu_gfx_poison_consumption_handler(adev, entry);
+}
+
+static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj,
+ struct amdgpu_iv_entry *entry)
+{
+ struct amdgpu_device *adev = obj->adev;
+ enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
+ u64 event_id;
+ int ret;
+
+ ret = amdgpu_ras_mark_ras_event(adev, type);
+ if (ret)
+ return;
+
+ event_id = amdgpu_ras_acquire_event_id(adev, type);
+ RAS_EVENT_LOG(adev, event_id, "Poison is created\n");
+
+ if (amdgpu_ip_version(obj->adev, UMC_HWIP, 0) >= IP_VERSION(12, 0, 0)) {
+ struct amdgpu_ras *con = amdgpu_ras_get_context(obj->adev);
+
+ atomic_inc(&con->page_retirement_req_cnt);
+ atomic_inc(&con->poison_creation_count);
+
+ wake_up(&con->page_retirement_wq);
+ }
+}
+
+static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_ih_data *data = &obj->ih_data;
+ struct ras_err_data err_data;
+ int ret;
+
+ if (!data->cb)
+ return;
+
+ ret = amdgpu_ras_error_data_init(&err_data);
+ if (ret)
+ return;
+
+ /* Let IP handle its data, maybe we need get the output
+ * from the callback to update the error type/count, etc
+ */
+ amdgpu_ras_set_fed(obj->adev, true);
+ ret = data->cb(obj->adev, &err_data, entry);
+ /* ue will trigger an interrupt, and in that case
+ * we need do a reset to recovery the whole system.
+ * But leave IP do that recovery, here we just dispatch
+ * the error.
+ */
+ if (ret == AMDGPU_RAS_SUCCESS) {
+ /* these counts could be left as 0 if
+ * some blocks do not count error number
+ */
+ obj->err_data.ue_count += err_data.ue_count;
+ obj->err_data.ce_count += err_data.ce_count;
+ obj->err_data.de_count += err_data.de_count;
+ }
+
+ amdgpu_ras_error_data_fini(&err_data);
+}
+
+static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
+{
+ struct ras_ih_data *data = &obj->ih_data;
+ struct amdgpu_iv_entry entry;
+
+ while (data->rptr != data->wptr) {
+ rmb();
+ memcpy(&entry, &data->ring[data->rptr],
+ data->element_size);
+
+ wmb();
+ data->rptr = (data->aligned_element_size +
+ data->rptr) % data->ring_size;
+
+ if (amdgpu_ras_is_poison_mode_supported(obj->adev)) {
+ if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
+ amdgpu_ras_interrupt_poison_creation_handler(obj, &entry);
+ else
+ amdgpu_ras_interrupt_poison_consumption_handler(obj, &entry);
+ } else {
+ if (obj->head.block == AMDGPU_RAS_BLOCK__UMC)
+ amdgpu_ras_interrupt_umc_handler(obj, &entry);
+ else
+ dev_warn(obj->adev->dev,
+ "No RAS interrupt handler for non-UMC block with poison disabled.\n");
+ }
+ }
+}
+
+static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
+{
+ struct ras_ih_data *data =
+ container_of(work, struct ras_ih_data, ih_work);
+ struct ras_manager *obj =
+ container_of(data, struct ras_manager, ih_data);
+
+ amdgpu_ras_interrupt_handler(obj);
+}
+
+int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
+ struct ras_dispatch_if *info)
+{
+ struct ras_manager *obj;
+ struct ras_ih_data *data;
+
+ obj = amdgpu_ras_find_obj(adev, &info->head);
+ if (!obj)
+ return -EINVAL;
+
+ data = &obj->ih_data;
+
+ if (data->inuse == 0)
+ return 0;
+
+ /* Might be overflow... */
+ memcpy(&data->ring[data->wptr], info->entry,
+ data->element_size);
+
+ wmb();
+ data->wptr = (data->aligned_element_size +
+ data->wptr) % data->ring_size;
+
+ schedule_work(&data->ih_work);
+
+ return 0;
+}
+
+int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
+ struct ras_ih_data *data;
+
+ if (!obj)
+ return -EINVAL;
+
+ data = &obj->ih_data;
+ if (data->inuse == 0)
+ return 0;
+
+ cancel_work_sync(&data->ih_work);
+
+ kfree(data->ring);
+ memset(data, 0, sizeof(*data));
+ put_obj(obj);
+
+ return 0;
+}
+
+int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
+ struct ras_ih_data *data;
+ struct amdgpu_ras_block_object *ras_obj;
+
+ if (!obj) {
+ /* in case we registe the IH before enable ras feature */
+ obj = amdgpu_ras_create_obj(adev, head);
+ if (!obj)
+ return -EINVAL;
+ } else
+ get_obj(obj);
+
+ ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm);
+
+ data = &obj->ih_data;
+ /* add the callback.etc */
+ *data = (struct ras_ih_data) {
+ .inuse = 0,
+ .cb = ras_obj->ras_cb,
+ .element_size = sizeof(struct amdgpu_iv_entry),
+ .rptr = 0,
+ .wptr = 0,
+ };
+
+ INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
+
+ data->aligned_element_size = ALIGN(data->element_size, 8);
+ /* the ring can store 64 iv entries. */
+ data->ring_size = 64 * data->aligned_element_size;
+ data->ring = kmalloc(data->ring_size, GFP_KERNEL);
+ if (!data->ring) {
+ put_obj(obj);
+ return -ENOMEM;
+ }
+
+ /* IH is ready */
+ data->inuse = 1;
+
+ return 0;
+}
+
+static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj, *tmp;
+
+ list_for_each_entry_safe(obj, tmp, &con->head, node) {
+ amdgpu_ras_interrupt_remove_handler(adev, &obj->head);
+ }
+
+ return 0;
+}
+/* ih end */
+
+/* traversal all IPs except NBIO to query error counter */
+static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev, enum ras_event_type type)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj;
+
+ if (!adev->ras_enabled || !con)
+ return;
+
+ list_for_each_entry(obj, &con->head, node) {
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
+
+ /*
+ * PCIE_BIF IP has one different isr by ras controller
+ * interrupt, the specific ras counter query will be
+ * done in that isr. So skip such block from common
+ * sync flood interrupt isr calling.
+ */
+ if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
+ continue;
+
+ /*
+ * this is a workaround for aldebaran, skip send msg to
+ * smu to get ecc_info table due to smu handle get ecc
+ * info table failed temporarily.
+ * should be removed until smu fix handle ecc_info table.
+ */
+ if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) &&
+ (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
+ IP_VERSION(13, 0, 2)))
+ continue;
+
+ amdgpu_ras_query_error_status_with_event(adev, &info, type);
+
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) !=
+ IP_VERSION(11, 0, 2) &&
+ amdgpu_ip_version(adev, MP0_HWIP, 0) !=
+ IP_VERSION(11, 0, 4) &&
+ amdgpu_ip_version(adev, MP0_HWIP, 0) !=
+ IP_VERSION(13, 0, 0)) {
+ if (amdgpu_ras_reset_error_status(adev, info.head.block))
+ dev_warn(adev->dev, "Failed to reset error counter and error status");
+ }
+ }
+}
+
+/* Parse RdRspStatus and WrRspStatus */
+static void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
+ struct ras_query_if *info)
+{
+ struct amdgpu_ras_block_object *block_obj;
+ /*
+ * Only two block need to query read/write
+ * RspStatus at current state
+ */
+ if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) &&
+ (info->head.block != AMDGPU_RAS_BLOCK__MMHUB))
+ return;
+
+ block_obj = amdgpu_ras_get_ras_block(adev,
+ info->head.block,
+ info->head.sub_block_index);
+
+ if (!block_obj || !block_obj->hw_ops) {
+ dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
+ get_ras_block_str(&info->head));
+ return;
+ }
+
+ if (block_obj->hw_ops->query_ras_error_status)
+ block_obj->hw_ops->query_ras_error_status(adev);
+
+}
+
+static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj;
+
+ if (!adev->ras_enabled || !con)
+ return;
+
+ list_for_each_entry(obj, &con->head, node) {
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
+
+ amdgpu_ras_error_status_query(adev, &info);
+ }
+}
+
+/* recovery begin */
+
+/* return 0 on success.
+ * caller need free bps.
+ */
+static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
+ struct ras_badpage **bps, unsigned int *count)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data;
+ int i = 0;
+ int ret = 0, status;
+
+ if (!con || !con->eh_data || !bps || !count)
+ return -EINVAL;
+
+ mutex_lock(&con->recovery_lock);
+ data = con->eh_data;
+ if (!data || data->count == 0) {
+ *bps = NULL;
+ ret = -EINVAL;
+ goto out;
+ }
+
+ *bps = kmalloc_array(data->count, sizeof(struct ras_badpage), GFP_KERNEL);
+ if (!*bps) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (; i < data->count; i++) {
+ if (!data->bps[i].ts)
+ continue;
+
+ (*bps)[i] = (struct ras_badpage){
+ .bp = data->bps[i].retired_page,
+ .size = AMDGPU_GPU_PAGE_SIZE,
+ .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
+ };
+
+ if (amdgpu_ras_check_critical_address(adev,
+ data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ continue;
+
+ status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
+ data->bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT);
+ if (status == -EBUSY)
+ (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
+ else if (status == -ENOENT)
+ (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
+ }
+
+ *count = con->bad_page_num;
+out:
+ mutex_unlock(&con->recovery_lock);
+ return ret;
+}
+
+static void amdgpu_ras_set_fed_all(struct amdgpu_device *adev,
+ struct amdgpu_hive_info *hive, bool status)
+{
+ struct amdgpu_device *tmp_adev;
+
+ if (hive) {
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
+ amdgpu_ras_set_fed(tmp_adev, status);
+ } else {
+ amdgpu_ras_set_fed(adev, status);
+ }
+}
+
+bool amdgpu_ras_in_recovery(struct amdgpu_device *adev)
+{
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ int hive_ras_recovery = 0;
+
+ if (hive) {
+ hive_ras_recovery = atomic_read(&hive->ras_recovery);
+ amdgpu_put_xgmi_hive(hive);
+ }
+
+ if (ras && (atomic_read(&ras->in_recovery) || hive_ras_recovery))
+ return true;
+
+ return false;
+}
+
+static enum ras_event_type amdgpu_ras_get_fatal_error_event(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_intr_triggered())
+ return RAS_EVENT_TYPE_FATAL;
+ else
+ return RAS_EVENT_TYPE_POISON_CONSUMPTION;
+}
+
+static void amdgpu_ras_do_recovery(struct work_struct *work)
+{
+ struct amdgpu_ras *ras =
+ container_of(work, struct amdgpu_ras, recovery_work);
+ struct amdgpu_device *remote_adev = NULL;
+ struct amdgpu_device *adev = ras->adev;
+ struct list_head device_list, *device_list_handle = NULL;
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+ unsigned int error_query_mode;
+ enum ras_event_type type;
+
+ if (hive) {
+ atomic_set(&hive->ras_recovery, 1);
+
+ /* If any device which is part of the hive received RAS fatal
+ * error interrupt, set fatal error status on all. This
+ * condition will need a recovery, and flag will be cleared
+ * as part of recovery.
+ */
+ list_for_each_entry(remote_adev, &hive->device_list,
+ gmc.xgmi.head)
+ if (amdgpu_ras_get_fed_status(remote_adev)) {
+ amdgpu_ras_set_fed_all(adev, hive, true);
+ break;
+ }
+ }
+ if (!ras->disable_ras_err_cnt_harvest) {
+
+ /* Build list of devices to query RAS related errors */
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
+ device_list_handle = &hive->device_list;
+ } else {
+ INIT_LIST_HEAD(&device_list);
+ list_add_tail(&adev->gmc.xgmi.head, &device_list);
+ device_list_handle = &device_list;
+ }
+
+ if (amdgpu_ras_get_error_query_mode(adev, &error_query_mode)) {
+ if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY) {
+ /* wait 500ms to ensure pmfw polling mca bank info done */
+ msleep(500);
+ }
+ }
+
+ type = amdgpu_ras_get_fatal_error_event(adev);
+ list_for_each_entry(remote_adev,
+ device_list_handle, gmc.xgmi.head) {
+ amdgpu_ras_query_err_status(remote_adev);
+ amdgpu_ras_log_on_err_counter(remote_adev, type);
+ }
+
+ }
+
+ if (amdgpu_device_should_recover_gpu(ras->adev)) {
+ struct amdgpu_reset_context reset_context;
+ memset(&reset_context, 0, sizeof(reset_context));
+
+ reset_context.method = AMD_RESET_METHOD_NONE;
+ reset_context.reset_req_dev = adev;
+ reset_context.src = AMDGPU_RESET_SRC_RAS;
+ set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
+
+ /* Perform full reset in fatal error mode */
+ if (!amdgpu_ras_is_poison_mode_supported(ras->adev))
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ else {
+ clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+
+ if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) {
+ ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ reset_context.method = AMD_RESET_METHOD_MODE2;
+ }
+
+ /* Fatal error occurs in poison mode, mode1 reset is used to
+ * recover gpu.
+ */
+ if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) {
+ ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET;
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+
+ psp_fatal_error_recovery_quirk(&adev->psp);
+ }
+ }
+
+ amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context);
+ }
+ atomic_set(&ras->in_recovery, 0);
+ if (hive) {
+ atomic_set(&hive->ras_recovery, 0);
+ amdgpu_put_xgmi_hive(hive);
+ }
+}
+
+/* alloc/realloc bps array */
+static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
+ struct ras_err_handler_data *data, int pages)
+{
+ unsigned int old_space = data->count + data->space_left;
+ unsigned int new_space = old_space + pages;
+ unsigned int align_space = ALIGN(new_space, 512);
+ void *bps = kmalloc_array(align_space, sizeof(*data->bps), GFP_KERNEL);
+
+ if (!bps) {
+ return -ENOMEM;
+ }
+
+ if (data->bps) {
+ memcpy(bps, data->bps,
+ data->count * sizeof(*data->bps));
+ kfree(data->bps);
+ }
+
+ data->bps = bps;
+ data->space_left += align_space - old_space;
+ return 0;
+}
+
+static int amdgpu_ras_mca2pa_by_idx(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps,
+ struct ras_err_data *err_data)
+{
+ struct ta_ras_query_address_input addr_in;
+ uint32_t socket = 0;
+ int ret = 0;
+
+ if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id)
+ socket = adev->smuio.funcs->get_socket_id(adev);
+
+ /* reinit err_data */
+ err_data->err_addr_cnt = 0;
+ err_data->err_addr_len = adev->umc.retire_unit;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ addr_in.ma.err_addr = bps->address;
+ addr_in.ma.socket_id = socket;
+ addr_in.ma.ch_inst = bps->mem_channel;
+ /* tell RAS TA the node instance is not used */
+ addr_in.ma.node_inst = TA_RAS_INV_NODE;
+
+ if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
+ ret = adev->umc.ras->convert_ras_err_addr(adev, err_data,
+ &addr_in, NULL, false);
+
+ return ret;
+}
+
+static int amdgpu_ras_mca2pa(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps,
+ struct ras_err_data *err_data)
+{
+ struct ta_ras_query_address_input addr_in;
+ uint32_t die_id, socket = 0;
+
+ if (adev->smuio.funcs && adev->smuio.funcs->get_socket_id)
+ socket = adev->smuio.funcs->get_socket_id(adev);
+
+ /* although die id is gotten from PA in nps1 mode, the id is
+ * fitable for any nps mode
+ */
+ if (adev->umc.ras && adev->umc.ras->get_die_id_from_pa)
+ die_id = adev->umc.ras->get_die_id_from_pa(adev, bps->address,
+ bps->retired_page << AMDGPU_GPU_PAGE_SHIFT);
+ else
+ return -EINVAL;
+
+ /* reinit err_data */
+ err_data->err_addr_cnt = 0;
+ err_data->err_addr_len = adev->umc.retire_unit;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ addr_in.ma.err_addr = bps->address;
+ addr_in.ma.ch_inst = bps->mem_channel;
+ addr_in.ma.umc_inst = bps->mcumc_id;
+ addr_in.ma.node_inst = die_id;
+ addr_in.ma.socket_id = socket;
+
+ if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
+ return adev->umc.ras->convert_ras_err_addr(adev, err_data,
+ &addr_in, NULL, false);
+ else
+ return -EINVAL;
+}
+
+static int __amdgpu_ras_restore_bad_pages(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps, int count)
+{
+ int j;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data = con->eh_data;
+
+ for (j = 0; j < count; j++) {
+ if (amdgpu_ras_check_bad_page_unlock(con,
+ bps[j].retired_page << AMDGPU_GPU_PAGE_SHIFT)) {
+ data->count++;
+ data->space_left--;
+ continue;
+ }
+
+ if (!data->space_left &&
+ amdgpu_ras_realloc_eh_data_space(adev, data, 256)) {
+ return -ENOMEM;
+ }
+
+ amdgpu_ras_reserve_page(adev, bps[j].retired_page);
+
+ memcpy(&data->bps[data->count], &(bps[j]),
+ sizeof(struct eeprom_table_record));
+ data->count++;
+ data->space_left--;
+ con->bad_page_num++;
+ }
+
+ return 0;
+}
+
+static int __amdgpu_ras_convert_rec_array_from_rom(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps, struct ras_err_data *err_data,
+ enum amdgpu_memory_partition nps)
+{
+ int i = 0;
+ enum amdgpu_memory_partition save_nps;
+
+ save_nps = (bps[0].retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
+
+ /*old asics just have pa in eeprom*/
+ if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
+ memcpy(err_data->err_addr, bps,
+ sizeof(struct eeprom_table_record) * adev->umc.retire_unit);
+ goto out;
+ }
+
+ for (i = 0; i < adev->umc.retire_unit; i++)
+ bps[i].retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
+
+ if (save_nps) {
+ if (save_nps == nps) {
+ if (amdgpu_umc_pages_in_a_row(adev, err_data,
+ bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ return -EINVAL;
+ for (i = 0; i < adev->umc.retire_unit; i++) {
+ err_data->err_addr[i].address = bps[0].address;
+ err_data->err_addr[i].mem_channel = bps[0].mem_channel;
+ err_data->err_addr[i].bank = bps[0].bank;
+ err_data->err_addr[i].err_type = bps[0].err_type;
+ err_data->err_addr[i].mcumc_id = bps[0].mcumc_id;
+ }
+ } else {
+ if (amdgpu_ras_mca2pa_by_idx(adev, &bps[0], err_data))
+ return -EINVAL;
+ }
+ } else {
+ if (bps[0].address == 0) {
+ /* for specific old eeprom data, mca address is not stored,
+ * calc it from pa
+ */
+ if (amdgpu_umc_pa2mca(adev, bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT,
+ &(bps[0].address), AMDGPU_NPS1_PARTITION_MODE))
+ return -EINVAL;
+ }
+
+ if (amdgpu_ras_mca2pa(adev, &bps[0], err_data)) {
+ if (nps == AMDGPU_NPS1_PARTITION_MODE)
+ memcpy(err_data->err_addr, bps,
+ sizeof(struct eeprom_table_record) * adev->umc.retire_unit);
+ else
+ return -EOPNOTSUPP;
+ }
+ }
+
+out:
+ return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr, adev->umc.retire_unit);
+}
+
+static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps, struct ras_err_data *err_data,
+ enum amdgpu_memory_partition nps)
+{
+ int i = 0;
+ enum amdgpu_memory_partition save_nps;
+
+ save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK;
+ bps->retired_page &= ~(UMC_NPS_MASK << UMC_NPS_SHIFT);
+
+ if (save_nps == nps) {
+ if (amdgpu_umc_pages_in_a_row(adev, err_data,
+ bps->retired_page << AMDGPU_GPU_PAGE_SHIFT))
+ return -EINVAL;
+ for (i = 0; i < adev->umc.retire_unit; i++) {
+ err_data->err_addr[i].address = bps->address;
+ err_data->err_addr[i].mem_channel = bps->mem_channel;
+ err_data->err_addr[i].bank = bps->bank;
+ err_data->err_addr[i].err_type = bps->err_type;
+ err_data->err_addr[i].mcumc_id = bps->mcumc_id;
+ }
+ } else {
+ if (bps->address) {
+ if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data))
+ return -EINVAL;
+ } else {
+ /* for specific old eeprom data, mca address is not stored,
+ * calc it from pa
+ */
+ if (amdgpu_umc_pa2mca(adev, bps->retired_page << AMDGPU_GPU_PAGE_SHIFT,
+ &(bps->address), AMDGPU_NPS1_PARTITION_MODE))
+ return -EINVAL;
+
+ if (amdgpu_ras_mca2pa(adev, bps, err_data))
+ return -EOPNOTSUPP;
+ }
+ }
+
+ return __amdgpu_ras_restore_bad_pages(adev, err_data->err_addr,
+ adev->umc.retire_unit);
+}
+
+/* it deal with vram only. */
+int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps, int pages, bool from_rom)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_data err_data;
+ struct amdgpu_ras_eeprom_control *control =
+ &adev->psp.ras_context.ras->eeprom_control;
+ enum amdgpu_memory_partition nps = AMDGPU_NPS1_PARTITION_MODE;
+ int ret = 0;
+ uint32_t i = 0;
+
+ if (!con || !con->eh_data || !bps || pages <= 0)
+ return 0;
+
+ if (from_rom) {
+ err_data.err_addr =
+ kcalloc(adev->umc.retire_unit,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+ if (!err_data.err_addr) {
+ dev_warn(adev->dev, "Failed to alloc UMC error address record in mca2pa conversion!\n");
+ return -ENOMEM;
+ }
+
+ if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+ nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ }
+
+ mutex_lock(&con->recovery_lock);
+
+ if (from_rom) {
+ /* there is no pa recs in V3, so skip pa recs processing */
+ if (control->tbl_hdr.version < RAS_TABLE_VER_V3) {
+ for (i = 0; i < pages; i++) {
+ if (control->ras_num_recs - i >= adev->umc.retire_unit) {
+ if ((bps[i].address == bps[i + 1].address) &&
+ (bps[i].mem_channel == bps[i + 1].mem_channel)) {
+ /* deal with retire_unit records a time */
+ ret = __amdgpu_ras_convert_rec_array_from_rom(adev,
+ &bps[i], &err_data, nps);
+ if (ret)
+ con->bad_page_num -= adev->umc.retire_unit;
+ i += (adev->umc.retire_unit - 1);
+ } else {
+ break;
+ }
+ } else {
+ break;
+ }
+ }
+ }
+ for (; i < pages; i++) {
+ ret = __amdgpu_ras_convert_rec_from_rom(adev,
+ &bps[i], &err_data, nps);
+ if (ret)
+ con->bad_page_num -= adev->umc.retire_unit;
+ }
+
+ con->eh_data->count_saved = con->eh_data->count;
+ } else {
+ ret = __amdgpu_ras_restore_bad_pages(adev, bps, pages);
+ }
+
+ if (from_rom)
+ kfree(err_data.err_addr);
+ mutex_unlock(&con->recovery_lock);
+
+ return ret;
+}
+
+/*
+ * write error record array to eeprom, the function should be
+ * protected by recovery_lock
+ * new_cnt: new added UE count, excluding reserved bad pages, can be NULL
+ */
+int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
+ unsigned long *new_cnt)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data;
+ struct amdgpu_ras_eeprom_control *control;
+ int save_count, unit_num, i;
+
+ if (!con || !con->eh_data) {
+ if (new_cnt)
+ *new_cnt = 0;
+
+ return 0;
+ }
+
+ if (!con->eeprom_control.is_eeprom_valid) {
+ dev_warn(adev->dev,
+ "Failed to save EEPROM table data because of EEPROM data corruption!");
+ if (new_cnt)
+ *new_cnt = 0;
+
+ return 0;
+ }
+
+ mutex_lock(&con->recovery_lock);
+ control = &con->eeprom_control;
+ data = con->eh_data;
+ unit_num = data->count / adev->umc.retire_unit - control->ras_num_recs;
+ save_count = con->bad_page_num - control->ras_num_bad_pages;
+ mutex_unlock(&con->recovery_lock);
+
+ if (new_cnt)
+ *new_cnt = unit_num;
+
+ /* only new entries are saved */
+ if (unit_num > 0) {
+ /*old asics only save pa to eeprom like before*/
+ if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12) {
+ if (amdgpu_ras_eeprom_append(control,
+ &data->bps[data->count_saved], unit_num)) {
+ dev_err(adev->dev, "Failed to save EEPROM table data!");
+ return -EIO;
+ }
+ } else {
+ for (i = 0; i < unit_num; i++) {
+ if (amdgpu_ras_eeprom_append(control,
+ &data->bps[data->count_saved +
+ i * adev->umc.retire_unit], 1)) {
+ dev_err(adev->dev, "Failed to save EEPROM table data!");
+ return -EIO;
+ }
+ }
+ }
+
+ dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count);
+ data->count_saved = data->count;
+ }
+
+ return 0;
+}
+
+/*
+ * read error record array in eeprom and reserve enough space for
+ * storing new bad pages
+ */
+static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_eeprom_control *control =
+ &adev->psp.ras_context.ras->eeprom_control;
+ struct eeprom_table_record *bps;
+ int ret, i = 0;
+
+ /* no bad page record, skip eeprom access */
+ if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0)
+ return 0;
+
+ bps = kcalloc(control->ras_num_recs, sizeof(*bps), GFP_KERNEL);
+ if (!bps)
+ return -ENOMEM;
+
+ ret = amdgpu_ras_eeprom_read(control, bps, control->ras_num_recs);
+ if (ret) {
+ dev_err(adev->dev, "Failed to load EEPROM table records!");
+ } else {
+ if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
+ /*In V3, there is no pa recs, and some cases(when address==0) may be parsed
+ as pa recs, so add verion check to avoid it.
+ */
+ if (control->tbl_hdr.version < RAS_TABLE_VER_V3) {
+ for (i = 0; i < control->ras_num_recs; i++) {
+ if ((control->ras_num_recs - i) >= adev->umc.retire_unit) {
+ if ((bps[i].address == bps[i + 1].address) &&
+ (bps[i].mem_channel == bps[i + 1].mem_channel)) {
+ control->ras_num_pa_recs += adev->umc.retire_unit;
+ i += (adev->umc.retire_unit - 1);
+ } else {
+ control->ras_num_mca_recs +=
+ (control->ras_num_recs - i);
+ break;
+ }
+ } else {
+ control->ras_num_mca_recs += (control->ras_num_recs - i);
+ break;
+ }
+ }
+ } else {
+ control->ras_num_mca_recs = control->ras_num_recs;
+ }
+ }
+
+ ret = amdgpu_ras_add_bad_pages(adev, bps, control->ras_num_recs, true);
+ if (ret)
+ goto out;
+
+ ret = amdgpu_ras_eeprom_check(control);
+ if (ret)
+ goto out;
+
+ /* HW not usable */
+ if (amdgpu_ras_is_rma(adev))
+ ret = -EHWPOISON;
+ }
+
+out:
+ kfree(bps);
+ return ret;
+}
+
+static int amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
+ uint64_t addr)
+{
+ struct ras_err_handler_data *data = con->eh_data;
+ struct amdgpu_device *adev = con->adev;
+ int i;
+
+ if ((addr >= adev->gmc.mc_vram_size &&
+ adev->gmc.mc_vram_size) ||
+ (addr >= RAS_UMC_INJECT_ADDR_LIMIT))
+ return -EINVAL;
+
+ addr >>= AMDGPU_GPU_PAGE_SHIFT;
+ for (i = 0; i < data->count; i++)
+ if (addr == data->bps[i].retired_page)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * check if an address belongs to bad page
+ *
+ * Note: this check is only for umc block
+ */
+static int amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
+ uint64_t addr)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int ret = 0;
+
+ if (!con || !con->eh_data)
+ return ret;
+
+ mutex_lock(&con->recovery_lock);
+ ret = amdgpu_ras_check_bad_page_unlock(con, addr);
+ mutex_unlock(&con->recovery_lock);
+ return ret;
+}
+
+static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev,
+ uint32_t max_count)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ /*
+ * amdgpu_bad_page_threshold is used to config
+ * the threshold for the number of bad pages.
+ * -1: Threshold is set to default value
+ * Driver will issue a warning message when threshold is reached
+ * and continue runtime services.
+ * 0: Disable bad page retirement
+ * Driver will not retire bad pages
+ * which is intended for debugging purpose.
+ * -2: Threshold is determined by a formula
+ * that assumes 1 bad page per 100M of local memory.
+ * Driver will continue runtime services when threhold is reached.
+ * 0 < threshold < max number of bad page records in EEPROM,
+ * A user-defined threshold is set
+ * Driver will halt runtime services when this custom threshold is reached.
+ */
+ if (amdgpu_bad_page_threshold == -2) {
+ u64 val = adev->gmc.mc_vram_size;
+
+ do_div(val, RAS_BAD_PAGE_COVER);
+ con->bad_page_cnt_threshold = min(lower_32_bits(val),
+ max_count);
+ } else if (amdgpu_bad_page_threshold == -1) {
+ con->bad_page_cnt_threshold = ((con->reserved_pages_in_bytes) >> 21) << 4;
+ } else {
+ con->bad_page_cnt_threshold = min_t(int, max_count,
+ amdgpu_bad_page_threshold);
+ }
+}
+
+int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, uint16_t pasid,
+ pasid_notify pasid_fn, void *data, uint32_t reset)
+{
+ int ret = 0;
+ struct ras_poison_msg poison_msg;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ memset(&poison_msg, 0, sizeof(poison_msg));
+ poison_msg.block = block;
+ poison_msg.pasid = pasid;
+ poison_msg.reset = reset;
+ poison_msg.pasid_fn = pasid_fn;
+ poison_msg.data = data;
+
+ ret = kfifo_put(&con->poison_fifo, poison_msg);
+ if (!ret) {
+ dev_err(adev->dev, "Poison message fifo is full!\n");
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+static int amdgpu_ras_get_poison_req(struct amdgpu_device *adev,
+ struct ras_poison_msg *poison_msg)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ return kfifo_get(&con->poison_fifo, poison_msg);
+}
+
+static void amdgpu_ras_ecc_log_init(struct ras_ecc_log_info *ecc_log)
+{
+ mutex_init(&ecc_log->lock);
+
+ INIT_RADIX_TREE(&ecc_log->de_page_tree, GFP_KERNEL);
+ ecc_log->de_queried_count = 0;
+ ecc_log->consumption_q_count = 0;
+}
+
+static void amdgpu_ras_ecc_log_fini(struct ras_ecc_log_info *ecc_log)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+ struct ras_ecc_err *ecc_err;
+
+ mutex_lock(&ecc_log->lock);
+ radix_tree_for_each_slot(slot, &ecc_log->de_page_tree, &iter, 0) {
+ ecc_err = radix_tree_deref_slot(slot);
+ kfree(ecc_err->err_pages.pfn);
+ kfree(ecc_err);
+ radix_tree_iter_delete(&ecc_log->de_page_tree, &iter, slot);
+ }
+ mutex_unlock(&ecc_log->lock);
+
+ mutex_destroy(&ecc_log->lock);
+ ecc_log->de_queried_count = 0;
+ ecc_log->consumption_q_count = 0;
+}
+
+static bool amdgpu_ras_schedule_retirement_dwork(struct amdgpu_ras *con,
+ uint32_t delayed_ms)
+{
+ int ret;
+
+ mutex_lock(&con->umc_ecc_log.lock);
+ ret = radix_tree_tagged(&con->umc_ecc_log.de_page_tree,
+ UMC_ECC_NEW_DETECTED_TAG);
+ mutex_unlock(&con->umc_ecc_log.lock);
+
+ if (ret)
+ schedule_delayed_work(&con->page_retirement_dwork,
+ msecs_to_jiffies(delayed_ms));
+
+ return ret ? true : false;
+}
+
+static void amdgpu_ras_do_page_retirement(struct work_struct *work)
+{
+ struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
+ page_retirement_dwork.work);
+ struct amdgpu_device *adev = con->adev;
+ struct ras_err_data err_data;
+
+ /* If gpu reset is ongoing, delay retiring the bad pages */
+ if (amdgpu_in_reset(adev) || amdgpu_ras_in_recovery(adev)) {
+ amdgpu_ras_schedule_retirement_dwork(con,
+ AMDGPU_RAS_RETIRE_PAGE_INTERVAL * 3);
+ return;
+ }
+
+ amdgpu_ras_error_data_init(&err_data);
+
+ amdgpu_umc_handle_bad_pages(adev, &err_data);
+
+ amdgpu_ras_error_data_fini(&err_data);
+
+ amdgpu_ras_schedule_retirement_dwork(con,
+ AMDGPU_RAS_RETIRE_PAGE_INTERVAL);
+}
+
+static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev,
+ uint32_t poison_creation_count)
+{
+ int ret = 0;
+ struct ras_ecc_log_info *ecc_log;
+ struct ras_query_if info;
+ u32 timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ u64 de_queried_count;
+ u64 consumption_q_count;
+ enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION;
+
+ memset(&info, 0, sizeof(info));
+ info.head.block = AMDGPU_RAS_BLOCK__UMC;
+
+ ecc_log = &ras->umc_ecc_log;
+ ecc_log->de_queried_count = 0;
+ ecc_log->consumption_q_count = 0;
+
+ do {
+ ret = amdgpu_ras_query_error_status_with_event(adev, &info, type);
+ if (ret)
+ return ret;
+
+ de_queried_count = ecc_log->de_queried_count;
+ consumption_q_count = ecc_log->consumption_q_count;
+
+ if (de_queried_count && consumption_q_count)
+ break;
+
+ msleep(100);
+ } while (--timeout);
+
+ if (de_queried_count)
+ schedule_delayed_work(&ras->page_retirement_dwork, 0);
+
+ if (amdgpu_ras_is_rma(adev) && atomic_cmpxchg(&ras->rma_in_recovery, 0, 1) == 0)
+ amdgpu_ras_reset_gpu(adev);
+
+ return 0;
+}
+
+static void amdgpu_ras_clear_poison_fifo(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_poison_msg msg;
+ int ret;
+
+ do {
+ ret = kfifo_get(&con->poison_fifo, &msg);
+ } while (ret);
+}
+
+static int amdgpu_ras_poison_consumption_handler(struct amdgpu_device *adev,
+ uint32_t msg_count, uint32_t *gpu_reset)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ uint32_t reset_flags = 0, reset = 0;
+ struct ras_poison_msg msg;
+ int ret, i;
+
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+
+ for (i = 0; i < msg_count; i++) {
+ ret = amdgpu_ras_get_poison_req(adev, &msg);
+ if (!ret)
+ continue;
+
+ if (msg.pasid_fn)
+ msg.pasid_fn(adev, msg.pasid, msg.data);
+
+ reset_flags |= msg.reset;
+ }
+
+ /*
+ * Try to ensure poison creation handler is completed first
+ * to set rma if bad page exceed threshold.
+ */
+ flush_delayed_work(&con->page_retirement_dwork);
+
+ /* for RMA, amdgpu_ras_poison_creation_handler will trigger gpu reset */
+ if (reset_flags && !amdgpu_ras_is_rma(adev)) {
+ if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET)
+ reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
+ else if (reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET)
+ reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
+ else
+ reset = reset_flags;
+
+ con->gpu_reset_flags |= reset;
+ amdgpu_ras_reset_gpu(adev);
+
+ *gpu_reset = reset;
+
+ /* Wait for gpu recovery to complete */
+ flush_work(&con->recovery_work);
+ }
+
+ return 0;
+}
+
+static int amdgpu_ras_page_retirement_thread(void *param)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)param;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ uint32_t poison_creation_count, msg_count;
+ uint32_t gpu_reset;
+ int ret;
+
+ while (!kthread_should_stop()) {
+
+ wait_event_interruptible(con->page_retirement_wq,
+ kthread_should_stop() ||
+ atomic_read(&con->page_retirement_req_cnt));
+
+ if (kthread_should_stop())
+ break;
+
+ mutex_lock(&con->poison_lock);
+ gpu_reset = 0;
+
+ do {
+ poison_creation_count = atomic_read(&con->poison_creation_count);
+ ret = amdgpu_ras_poison_creation_handler(adev, poison_creation_count);
+ if (ret == -EIO)
+ break;
+
+ if (poison_creation_count) {
+ atomic_sub(poison_creation_count, &con->poison_creation_count);
+ atomic_sub(poison_creation_count, &con->page_retirement_req_cnt);
+ }
+ } while (atomic_read(&con->poison_creation_count) &&
+ !atomic_read(&con->poison_consumption_count));
+
+ if (ret != -EIO) {
+ msg_count = kfifo_len(&con->poison_fifo);
+ if (msg_count) {
+ ret = amdgpu_ras_poison_consumption_handler(adev,
+ msg_count, &gpu_reset);
+ if ((ret != -EIO) &&
+ (gpu_reset != AMDGPU_RAS_GPU_RESET_MODE1_RESET))
+ atomic_sub(msg_count, &con->page_retirement_req_cnt);
+ }
+ }
+
+ if ((ret == -EIO) || (gpu_reset == AMDGPU_RAS_GPU_RESET_MODE1_RESET)) {
+ /* gpu mode-1 reset is ongoing or just completed ras mode-1 reset */
+ /* Clear poison creation request */
+ atomic_set(&con->poison_creation_count, 0);
+ atomic_set(&con->poison_consumption_count, 0);
+
+ /* Clear poison fifo */
+ amdgpu_ras_clear_poison_fifo(adev);
+
+ /* Clear all poison requests */
+ atomic_set(&con->page_retirement_req_cnt, 0);
+
+ if (ret == -EIO) {
+ /* Wait for mode-1 reset to complete */
+ down_read(&adev->reset_domain->sem);
+ up_read(&adev->reset_domain->sem);
+ }
+
+ /* Wake up work to save bad pages to eeprom */
+ schedule_delayed_work(&con->page_retirement_dwork, 0);
+ } else if (gpu_reset) {
+ /* gpu just completed mode-2 reset or other reset */
+ /* Clear poison consumption messages cached in fifo */
+ msg_count = kfifo_len(&con->poison_fifo);
+ if (msg_count) {
+ amdgpu_ras_clear_poison_fifo(adev);
+ atomic_sub(msg_count, &con->page_retirement_req_cnt);
+ }
+
+ atomic_set(&con->poison_consumption_count, 0);
+
+ /* Wake up work to save bad pages to eeprom */
+ schedule_delayed_work(&con->page_retirement_dwork, 0);
+ }
+ mutex_unlock(&con->poison_lock);
+ }
+
+ return 0;
+}
+
+int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_eeprom_control *control;
+ int ret;
+
+ if (!con || amdgpu_sriov_vf(adev))
+ return 0;
+
+ control = &con->eeprom_control;
+ ret = amdgpu_ras_eeprom_init(control);
+ control->is_eeprom_valid = !ret;
+
+ if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr)
+ control->ras_num_pa_recs = control->ras_num_recs;
+
+ if (adev->umc.ras &&
+ adev->umc.ras->get_retire_flip_bits)
+ adev->umc.ras->get_retire_flip_bits(adev);
+
+ if (control->ras_num_recs && control->is_eeprom_valid) {
+ ret = amdgpu_ras_load_bad_pages(adev);
+ if (ret) {
+ control->is_eeprom_valid = false;
+ return 0;
+ }
+
+ amdgpu_dpm_send_hbm_bad_pages_num(
+ adev, control->ras_num_bad_pages);
+
+ if (con->update_channel_flag == true) {
+ amdgpu_dpm_send_hbm_bad_channel_flag(
+ adev, control->bad_channel_bitmap);
+ con->update_channel_flag = false;
+ }
+
+ /* The format action is only applied to new ASICs */
+ if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) >= 12 &&
+ control->tbl_hdr.version < RAS_TABLE_VER_V3)
+ if (!amdgpu_ras_eeprom_reset_table(control))
+ if (amdgpu_ras_save_bad_pages(adev, NULL))
+ dev_warn(adev->dev, "Failed to format RAS EEPROM data in V3 version!\n");
+ }
+
+ return 0;
+}
+
+int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data **data;
+ u32 max_eeprom_records_count = 0;
+ int ret;
+
+ if (!con || amdgpu_sriov_vf(adev))
+ return 0;
+
+ /* Allow access to RAS EEPROM via debugfs, when the ASIC
+ * supports RAS and debugfs is enabled, but when
+ * adev->ras_enabled is unset, i.e. when "ras_enable"
+ * module parameter is set to 0.
+ */
+ con->adev = adev;
+
+ if (!adev->ras_enabled)
+ return 0;
+
+ data = &con->eh_data;
+ *data = kzalloc(sizeof(**data), GFP_KERNEL);
+ if (!*data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mutex_init(&con->recovery_lock);
+ mutex_init(&con->poison_lock);
+ INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
+ atomic_set(&con->in_recovery, 0);
+ atomic_set(&con->rma_in_recovery, 0);
+ con->eeprom_control.bad_channel_bitmap = 0;
+
+ max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(&con->eeprom_control);
+ amdgpu_ras_validate_threshold(adev, max_eeprom_records_count);
+
+ if (init_bp_info) {
+ ret = amdgpu_ras_init_badpage_info(adev);
+ if (ret)
+ goto free;
+ }
+
+ mutex_init(&con->page_rsv_lock);
+ INIT_KFIFO(con->poison_fifo);
+ mutex_init(&con->page_retirement_lock);
+ init_waitqueue_head(&con->page_retirement_wq);
+ atomic_set(&con->page_retirement_req_cnt, 0);
+ atomic_set(&con->poison_creation_count, 0);
+ atomic_set(&con->poison_consumption_count, 0);
+ con->page_retirement_thread =
+ kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement");
+ if (IS_ERR(con->page_retirement_thread)) {
+ con->page_retirement_thread = NULL;
+ dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n");
+ }
+
+ INIT_DELAYED_WORK(&con->page_retirement_dwork, amdgpu_ras_do_page_retirement);
+ amdgpu_ras_ecc_log_init(&con->umc_ecc_log);
+#ifdef CONFIG_X86_MCE_AMD
+ if ((adev->asic_type == CHIP_ALDEBARAN) &&
+ (adev->gmc.xgmi.connected_to_cpu))
+ amdgpu_register_bad_pages_mca_notifier(adev);
+#endif
+ return 0;
+
+free:
+ kfree((*data)->bps);
+ kfree(*data);
+ con->eh_data = NULL;
+out:
+ dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n", ret);
+
+ /*
+ * Except error threshold exceeding case, other failure cases in this
+ * function would not fail amdgpu driver init.
+ */
+ if (!amdgpu_ras_is_rma(adev))
+ ret = 0;
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data = con->eh_data;
+ int max_flush_timeout = MAX_FLUSH_RETIRE_DWORK_TIMES;
+ bool ret;
+
+ /* recovery_init failed to init it, fini is useless */
+ if (!data)
+ return 0;
+
+ /* Save all cached bad pages to eeprom */
+ do {
+ flush_delayed_work(&con->page_retirement_dwork);
+ ret = amdgpu_ras_schedule_retirement_dwork(con, 0);
+ } while (ret && max_flush_timeout--);
+
+ if (con->page_retirement_thread)
+ kthread_stop(con->page_retirement_thread);
+
+ atomic_set(&con->page_retirement_req_cnt, 0);
+ atomic_set(&con->poison_creation_count, 0);
+
+ mutex_destroy(&con->page_rsv_lock);
+
+ cancel_work_sync(&con->recovery_work);
+
+ cancel_delayed_work_sync(&con->page_retirement_dwork);
+
+ amdgpu_ras_ecc_log_fini(&con->umc_ecc_log);
+
+ mutex_lock(&con->recovery_lock);
+ con->eh_data = NULL;
+ kfree(data->bps);
+ kfree(data);
+ mutex_unlock(&con->recovery_lock);
+
+ amdgpu_ras_critical_region_init(adev);
+
+ return 0;
+}
+/* recovery end */
+
+static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev)) {
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+ case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
+ case IP_VERSION(13, 0, 14):
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ if (adev->asic_type == CHIP_IP_DISCOVERY) {
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+ case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 12):
+ case IP_VERSION(13, 0, 14):
+ case IP_VERSION(14, 0, 3):
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ return adev->asic_type == CHIP_VEGA10 ||
+ adev->asic_type == CHIP_VEGA20 ||
+ adev->asic_type == CHIP_ARCTURUS ||
+ adev->asic_type == CHIP_ALDEBARAN ||
+ adev->asic_type == CHIP_SIENNA_CICHLID;
+}
+
+/*
+ * this is workaround for vega20 workstation sku,
+ * force enable gfx ras, ignore vbios gfx ras flag
+ * due to GC EDC can not write
+ */
+static void amdgpu_ras_get_quirks(struct amdgpu_device *adev)
+{
+ struct atom_context *ctx = adev->mode_info.atom_context;
+
+ if (!ctx)
+ return;
+
+ if (strnstr(ctx->vbios_pn, "D16406",
+ sizeof(ctx->vbios_pn)) ||
+ strnstr(ctx->vbios_pn, "D36002",
+ sizeof(ctx->vbios_pn)))
+ adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX);
+}
+
+/* Query ras capablity via atomfirmware interface */
+static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev)
+{
+ /* mem_ecc cap */
+ if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
+ dev_info(adev->dev, "MEM ECC is active.\n");
+ adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC |
+ 1 << AMDGPU_RAS_BLOCK__DF);
+ } else {
+ dev_info(adev->dev, "MEM ECC is not presented.\n");
+ }
+
+ /* sram_ecc cap */
+ if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
+ dev_info(adev->dev, "SRAM ECC is active.\n");
+ if (!amdgpu_sriov_vf(adev))
+ adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
+ 1 << AMDGPU_RAS_BLOCK__DF);
+ else
+ adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
+ 1 << AMDGPU_RAS_BLOCK__SDMA |
+ 1 << AMDGPU_RAS_BLOCK__GFX);
+
+ /*
+ * VCN/JPEG RAS can be supported on both bare metal and
+ * SRIOV environment
+ */
+ if (amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(2, 6, 0) ||
+ amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 0) ||
+ amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(4, 0, 3) ||
+ amdgpu_ip_version(adev, VCN_HWIP, 0) == IP_VERSION(5, 0, 1))
+ adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
+ 1 << AMDGPU_RAS_BLOCK__JPEG);
+ else
+ adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
+ 1 << AMDGPU_RAS_BLOCK__JPEG);
+
+ /*
+ * XGMI RAS is not supported if xgmi num physical nodes
+ * is zero
+ */
+ if (!adev->gmc.xgmi.num_physical_nodes)
+ adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL);
+ } else {
+ dev_info(adev->dev, "SRAM ECC is not presented.\n");
+ }
+}
+
+/* Query poison mode from umc/df IP callbacks */
+static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ bool df_poison, umc_poison;
+
+ /* poison setting is useless on SRIOV guest */
+ if (amdgpu_sriov_vf(adev) || !con)
+ return;
+
+ /* Init poison supported flag, the default value is false */
+ if (adev->gmc.xgmi.connected_to_cpu ||
+ adev->gmc.is_app_apu) {
+ /* enabled by default when GPU is connected to CPU */
+ con->poison_supported = true;
+ } else if (adev->df.funcs &&
+ adev->df.funcs->query_ras_poison_mode &&
+ adev->umc.ras &&
+ adev->umc.ras->query_ras_poison_mode) {
+ df_poison =
+ adev->df.funcs->query_ras_poison_mode(adev);
+ umc_poison =
+ adev->umc.ras->query_ras_poison_mode(adev);
+
+ /* Only poison is set in both DF and UMC, we can support it */
+ if (df_poison && umc_poison)
+ con->poison_supported = true;
+ else if (df_poison != umc_poison)
+ dev_warn(adev->dev,
+ "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
+ df_poison, umc_poison);
+ }
+}
+
+/*
+ * check hardware's ras ability which will be saved in hw_supported.
+ * if hardware does not support ras, we can skip some ras initializtion and
+ * forbid some ras operations from IP.
+ * if software itself, say boot parameter, limit the ras ability. We still
+ * need allow IP do some limited operations, like disable. In such case,
+ * we have to initialize ras as normal. but need check if operation is
+ * allowed or not in each function.
+ */
+static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
+{
+ adev->ras_hw_enabled = adev->ras_enabled = 0;
+
+ if (!amdgpu_ras_asic_supported(adev))
+ return;
+
+ if (amdgpu_sriov_vf(adev)) {
+ if (amdgpu_virt_get_ras_capability(adev))
+ goto init_ras_enabled_flag;
+ }
+
+ /* query ras capability from psp */
+ if (amdgpu_psp_get_ras_capability(&adev->psp))
+ goto init_ras_enabled_flag;
+
+ /* query ras capablity from bios */
+ if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
+ amdgpu_ras_query_ras_capablity_from_vbios(adev);
+ } else {
+ /* driver only manages a few IP blocks RAS feature
+ * when GPU is connected cpu through XGMI */
+ adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX |
+ 1 << AMDGPU_RAS_BLOCK__SDMA |
+ 1 << AMDGPU_RAS_BLOCK__MMHUB);
+ }
+
+ /* apply asic specific settings (vega20 only for now) */
+ amdgpu_ras_get_quirks(adev);
+
+ /* query poison mode from umc/df ip callback */
+ amdgpu_ras_query_poison_mode(adev);
+
+init_ras_enabled_flag:
+ /* hw_supported needs to be aligned with RAS block mask. */
+ adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK;
+
+ adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 :
+ adev->ras_hw_enabled & amdgpu_ras_mask;
+
+ /* aca is disabled by default except for psp v13_0_6/v13_0_12/v13_0_14 */
+ if (!amdgpu_sriov_vf(adev)) {
+ adev->aca.is_enabled =
+ (amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 6) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 12) ||
+ amdgpu_ip_version(adev, MP0_HWIP, 0) == IP_VERSION(13, 0, 14));
+ }
+
+ /* bad page feature is not applicable to specific app platform */
+ if (adev->gmc.is_app_apu &&
+ amdgpu_ip_version(adev, UMC_HWIP, 0) == IP_VERSION(12, 0, 0))
+ amdgpu_bad_page_threshold = 0;
+}
+
+static void amdgpu_ras_counte_dw(struct work_struct *work)
+{
+ struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
+ ras_counte_delay_work.work);
+ struct amdgpu_device *adev = con->adev;
+ struct drm_device *dev = adev_to_drm(adev);
+ unsigned long ce_count, ue_count;
+ int res;
+
+ res = pm_runtime_get_sync(dev->dev);
+ if (res < 0)
+ goto Out;
+
+ /* Cache new values.
+ */
+ if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
+ atomic_set(&con->ras_ce_count, ce_count);
+ atomic_set(&con->ras_ue_count, ue_count);
+ }
+
+ pm_runtime_mark_last_busy(dev->dev);
+Out:
+ pm_runtime_put_autosuspend(dev->dev);
+}
+
+static int amdgpu_get_ras_schema(struct amdgpu_device *adev)
+{
+ return amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 |
+ AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE |
+ AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE |
+ AMDGPU_RAS_ERROR__PARITY;
+}
+
+static void ras_event_mgr_init(struct ras_event_manager *mgr)
+{
+ struct ras_event_state *event_state;
+ int i;
+
+ memset(mgr, 0, sizeof(*mgr));
+ atomic64_set(&mgr->seqno, 0);
+
+ for (i = 0; i < ARRAY_SIZE(mgr->event_state); i++) {
+ event_state = &mgr->event_state[i];
+ event_state->last_seqno = RAS_EVENT_INVALID_ID;
+ atomic64_set(&event_state->count, 0);
+ }
+}
+
+static void amdgpu_ras_event_mgr_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ struct amdgpu_hive_info *hive;
+
+ if (!ras)
+ return;
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ ras->event_mgr = hive ? &hive->event_mgr : &ras->__event_mgr;
+
+ /* init event manager with node 0 on xgmi system */
+ if (!amdgpu_reset_in_recovery(adev)) {
+ if (!hive || adev->gmc.xgmi.node_id == 0)
+ ras_event_mgr_init(ras->event_mgr);
+ }
+
+ if (hive)
+ amdgpu_put_xgmi_hive(hive);
+}
+
+static void amdgpu_ras_init_reserved_vram_size(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!con || (adev->flags & AMD_IS_APU))
+ return;
+
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+ case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
+ con->reserved_pages_in_bytes = AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT;
+ break;
+ case IP_VERSION(13, 0, 14):
+ con->reserved_pages_in_bytes = (AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT << 1);
+ break;
+ default:
+ break;
+ }
+}
+
+int amdgpu_ras_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int r;
+
+ if (con)
+ return 0;
+
+ con = kzalloc(sizeof(*con) +
+ sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
+ sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
+ GFP_KERNEL);
+ if (!con)
+ return -ENOMEM;
+
+ con->adev = adev;
+ INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
+ atomic_set(&con->ras_ce_count, 0);
+ atomic_set(&con->ras_ue_count, 0);
+
+ con->objs = (struct ras_manager *)(con + 1);
+
+ amdgpu_ras_set_context(adev, con);
+
+ amdgpu_ras_check_supported(adev);
+
+ if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) {
+ /* set gfx block ras context feature for VEGA20 Gaming
+ * send ras disable cmd to ras ta during ras late init.
+ */
+ if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) {
+ con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
+
+ return 0;
+ }
+
+ r = 0;
+ goto release_con;
+ }
+
+ con->update_channel_flag = false;
+ con->features = 0;
+ con->schema = 0;
+ INIT_LIST_HEAD(&con->head);
+ /* Might need get this flag from vbios. */
+ con->flags = RAS_DEFAULT_FLAGS;
+
+ /* initialize nbio ras function ahead of any other
+ * ras functions so hardware fatal error interrupt
+ * can be enabled as early as possible */
+ switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
+ case IP_VERSION(7, 4, 0):
+ case IP_VERSION(7, 4, 1):
+ case IP_VERSION(7, 4, 4):
+ if (!adev->gmc.xgmi.connected_to_cpu)
+ adev->nbio.ras = &nbio_v7_4_ras;
+ break;
+ case IP_VERSION(4, 3, 0):
+ if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
+ /* unlike other generation of nbio ras,
+ * nbio v4_3 only support fatal error interrupt
+ * to inform software that DF is freezed due to
+ * system fatal error event. driver should not
+ * enable nbio ras in such case. Instead,
+ * check DF RAS */
+ adev->nbio.ras = &nbio_v4_3_ras;
+ break;
+ case IP_VERSION(6, 3, 1):
+ if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF))
+ /* unlike other generation of nbio ras,
+ * nbif v6_3_1 only support fatal error interrupt
+ * to inform software that DF is freezed due to
+ * system fatal error event. driver should not
+ * enable nbio ras in such case. Instead,
+ * check DF RAS
+ */
+ adev->nbio.ras = &nbif_v6_3_1_ras;
+ break;
+ case IP_VERSION(7, 9, 0):
+ case IP_VERSION(7, 9, 1):
+ if (!adev->gmc.is_app_apu)
+ adev->nbio.ras = &nbio_v7_9_ras;
+ break;
+ default:
+ /* nbio ras is not available */
+ break;
+ }
+
+ /* nbio ras block needs to be enabled ahead of other ras blocks
+ * to handle fatal error */
+ r = amdgpu_nbio_ras_sw_init(adev);
+ if (r)
+ return r;
+
+ if (adev->nbio.ras &&
+ adev->nbio.ras->init_ras_controller_interrupt) {
+ r = adev->nbio.ras->init_ras_controller_interrupt(adev);
+ if (r)
+ goto release_con;
+ }
+
+ if (adev->nbio.ras &&
+ adev->nbio.ras->init_ras_err_event_athub_interrupt) {
+ r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev);
+ if (r)
+ goto release_con;
+ }
+
+ /* Packed socket_id to ras feature mask bits[31:29] */
+ if (adev->smuio.funcs &&
+ adev->smuio.funcs->get_socket_id)
+ con->features |= ((adev->smuio.funcs->get_socket_id(adev)) <<
+ AMDGPU_RAS_FEATURES_SOCKETID_SHIFT);
+
+ /* Get RAS schema for particular SOC */
+ con->schema = amdgpu_get_ras_schema(adev);
+
+ amdgpu_ras_init_reserved_vram_size(adev);
+
+ if (amdgpu_ras_fs_init(adev)) {
+ r = -EINVAL;
+ goto release_con;
+ }
+
+ if (amdgpu_ras_aca_is_supported(adev)) {
+ if (amdgpu_aca_is_enabled(adev))
+ r = amdgpu_aca_init(adev);
+ else
+ r = amdgpu_mca_init(adev);
+ if (r)
+ goto release_con;
+ }
+
+ con->init_task_pid = task_pid_nr(current);
+ get_task_comm(con->init_task_comm, current);
+
+ mutex_init(&con->critical_region_lock);
+ INIT_LIST_HEAD(&con->critical_region_head);
+
+ dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
+ "hardware ability[%x] ras_mask[%x]\n",
+ adev->ras_hw_enabled, adev->ras_enabled);
+
+ return 0;
+release_con:
+ amdgpu_ras_set_context(adev, NULL);
+ kfree(con);
+
+ return r;
+}
+
+int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev)
+{
+ if (adev->gmc.xgmi.connected_to_cpu ||
+ adev->gmc.is_app_apu)
+ return 1;
+ return 0;
+}
+
+static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block)
+{
+ struct ras_query_if info = {
+ .head = *ras_block,
+ };
+
+ if (!amdgpu_persistent_edc_harvesting_supported(adev))
+ return 0;
+
+ if (amdgpu_ras_query_error_status(adev, &info) != 0)
+ DRM_WARN("RAS init harvest failure");
+
+ if (amdgpu_ras_reset_error_status(adev, ras_block->block) != 0)
+ DRM_WARN("RAS init harvest reset failure");
+
+ return 0;
+}
+
+bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!con)
+ return false;
+
+ return con->poison_supported;
+}
+
+/* helper function to handle common stuff in ip late init phase */
+int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block)
+{
+ struct amdgpu_ras_block_object *ras_obj = NULL;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_query_if *query_info;
+ unsigned long ue_count, ce_count;
+ int r;
+
+ /* disable RAS feature per IP block if it is not supported */
+ if (!amdgpu_ras_is_supported(adev, ras_block->block)) {
+ amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
+ return 0;
+ }
+
+ r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
+ if (r) {
+ if (adev->in_suspend || amdgpu_reset_in_recovery(adev)) {
+ /* in resume phase, if fail to enable ras,
+ * clean up all ras fs nodes, and disable ras */
+ goto cleanup;
+ } else
+ return r;
+ }
+
+ /* check for errors on warm reset edc persisant supported ASIC */
+ amdgpu_persistent_edc_harvesting(adev, ras_block);
+
+ /* in resume phase, no need to create ras fs node */
+ if (adev->in_suspend || amdgpu_reset_in_recovery(adev))
+ return 0;
+
+ ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
+ if (ras_obj->ras_cb || (ras_obj->hw_ops &&
+ (ras_obj->hw_ops->query_poison_status ||
+ ras_obj->hw_ops->handle_poison_consumption))) {
+ r = amdgpu_ras_interrupt_add_handler(adev, ras_block);
+ if (r)
+ goto cleanup;
+ }
+
+ if (ras_obj->hw_ops &&
+ (ras_obj->hw_ops->query_ras_error_count ||
+ ras_obj->hw_ops->query_ras_error_status)) {
+ r = amdgpu_ras_sysfs_create(adev, ras_block);
+ if (r)
+ goto interrupt;
+
+ /* Those are the cached values at init.
+ */
+ query_info = kzalloc(sizeof(*query_info), GFP_KERNEL);
+ if (!query_info)
+ return -ENOMEM;
+ memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
+
+ if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
+ atomic_set(&con->ras_ce_count, ce_count);
+ atomic_set(&con->ras_ue_count, ue_count);
+ }
+
+ kfree(query_info);
+ }
+
+ return 0;
+
+interrupt:
+ if (ras_obj->ras_cb)
+ amdgpu_ras_interrupt_remove_handler(adev, ras_block);
+cleanup:
+ amdgpu_ras_feature_enable(adev, ras_block, 0);
+ return r;
+}
+
+static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block)
+{
+ return amdgpu_ras_block_late_init(adev, ras_block);
+}
+
+/* helper function to remove ras fs node and interrupt handler */
+void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block)
+{
+ struct amdgpu_ras_block_object *ras_obj;
+ if (!ras_block)
+ return;
+
+ amdgpu_ras_sysfs_remove(adev, ras_block);
+
+ ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm);
+ if (ras_obj->ras_cb)
+ amdgpu_ras_interrupt_remove_handler(adev, ras_block);
+}
+
+static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block)
+{
+ return amdgpu_ras_block_late_fini(adev, ras_block);
+}
+
+/* do some init work after IP late init as dependence.
+ * and it runs in resume/gpu reset/booting up cases.
+ */
+void amdgpu_ras_resume(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj, *tmp;
+
+ if (!adev->ras_enabled || !con) {
+ /* clean ras context for VEGA20 Gaming after send ras disable cmd */
+ amdgpu_release_ras_context(adev);
+
+ return;
+ }
+
+ if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
+ /* Set up all other IPs which are not implemented. There is a
+ * tricky thing that IP's actual ras error type should be
+ * MULTI_UNCORRECTABLE, but as driver does not handle it, so
+ * ERROR_NONE make sense anyway.
+ */
+ amdgpu_ras_enable_all_features(adev, 1);
+
+ /* We enable ras on all hw_supported block, but as boot
+ * parameter might disable some of them and one or more IP has
+ * not implemented yet. So we disable them on behalf.
+ */
+ list_for_each_entry_safe(obj, tmp, &con->head, node) {
+ if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
+ amdgpu_ras_feature_enable(adev, &obj->head, 0);
+ /* there should be no any reference. */
+ WARN_ON(alive_obj(obj));
+ }
+ }
+ }
+}
+
+void amdgpu_ras_suspend(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!adev->ras_enabled || !con)
+ return;
+
+ amdgpu_ras_disable_all_features(adev, 0);
+ /* Make sure all ras objects are disabled. */
+ if (AMDGPU_RAS_GET_FEATURES(con->features))
+ amdgpu_ras_disable_all_features(adev, 1);
+}
+
+int amdgpu_ras_late_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_block_list *node, *tmp;
+ struct amdgpu_ras_block_object *obj;
+ int r;
+
+ amdgpu_ras_event_mgr_init(adev);
+
+ if (amdgpu_ras_aca_is_supported(adev)) {
+ if (amdgpu_reset_in_recovery(adev)) {
+ if (amdgpu_aca_is_enabled(adev))
+ r = amdgpu_aca_reset(adev);
+ else
+ r = amdgpu_mca_reset(adev);
+ if (r)
+ return r;
+ }
+
+ if (!amdgpu_sriov_vf(adev)) {
+ if (amdgpu_aca_is_enabled(adev))
+ amdgpu_ras_set_aca_debug_mode(adev, false);
+ else
+ amdgpu_ras_set_mca_debug_mode(adev, false);
+ }
+ }
+
+ /* Guest side doesn't need init ras feature */
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_ras_telemetry_en(adev))
+ return 0;
+
+ list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
+ obj = node->ras_obj;
+ if (!obj) {
+ dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
+ continue;
+ }
+
+ if (!amdgpu_ras_is_supported(adev, obj->ras_comm.block))
+ continue;
+
+ if (obj->ras_late_init) {
+ r = obj->ras_late_init(adev, &obj->ras_comm);
+ if (r) {
+ dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n",
+ obj->ras_comm.name, r);
+ return r;
+ }
+ } else
+ amdgpu_ras_block_late_init_default(adev, &obj->ras_comm);
+ }
+
+ return 0;
+}
+
+/* do some fini work before IP fini as dependence */
+int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!adev->ras_enabled || !con)
+ return 0;
+
+
+ /* Need disable ras on all IPs here before ip [hw/sw]fini */
+ if (AMDGPU_RAS_GET_FEATURES(con->features))
+ amdgpu_ras_disable_all_features(adev, 0);
+ amdgpu_ras_recovery_fini(adev);
+ return 0;
+}
+
+int amdgpu_ras_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras_block_list *ras_node, *tmp;
+ struct amdgpu_ras_block_object *obj = NULL;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!adev->ras_enabled || !con)
+ return 0;
+
+ amdgpu_ras_critical_region_fini(adev);
+ mutex_destroy(&con->critical_region_lock);
+
+ list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) {
+ if (ras_node->ras_obj) {
+ obj = ras_node->ras_obj;
+ if (amdgpu_ras_is_supported(adev, obj->ras_comm.block) &&
+ obj->ras_fini)
+ obj->ras_fini(adev, &obj->ras_comm);
+ else
+ amdgpu_ras_block_late_fini_default(adev, &obj->ras_comm);
+ }
+
+ /* Clear ras blocks from ras_list and free ras block list node */
+ list_del(&ras_node->node);
+ kfree(ras_node);
+ }
+
+ amdgpu_ras_fs_fini(adev);
+ amdgpu_ras_interrupt_remove_all(adev);
+
+ if (amdgpu_ras_aca_is_supported(adev)) {
+ if (amdgpu_aca_is_enabled(adev))
+ amdgpu_aca_fini(adev);
+ else
+ amdgpu_mca_fini(adev);
+ }
+
+ WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared");
+
+ if (AMDGPU_RAS_GET_FEATURES(con->features))
+ amdgpu_ras_disable_all_features(adev, 0);
+
+ cancel_delayed_work_sync(&con->ras_counte_delay_work);
+
+ amdgpu_ras_set_context(adev, NULL);
+ kfree(con);
+
+ return 0;
+}
+
+bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *ras;
+
+ ras = amdgpu_ras_get_context(adev);
+ if (!ras)
+ return false;
+
+ return test_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
+}
+
+void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status)
+{
+ struct amdgpu_ras *ras;
+
+ ras = amdgpu_ras_get_context(adev);
+ if (ras) {
+ if (status)
+ set_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
+ else
+ clear_bit(AMDGPU_RAS_BLOCK__LAST, &ras->ras_err_state);
+ }
+}
+
+void amdgpu_ras_clear_err_state(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *ras;
+
+ ras = amdgpu_ras_get_context(adev);
+ if (ras) {
+ ras->ras_err_state = 0;
+ ras->gpu_reset_flags = 0;
+ }
+}
+
+void amdgpu_ras_set_err_poison(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block)
+{
+ struct amdgpu_ras *ras;
+
+ ras = amdgpu_ras_get_context(adev);
+ if (ras)
+ set_bit(block, &ras->ras_err_state);
+}
+
+bool amdgpu_ras_is_err_state(struct amdgpu_device *adev, int block)
+{
+ struct amdgpu_ras *ras;
+
+ ras = amdgpu_ras_get_context(adev);
+ if (ras) {
+ if (block == AMDGPU_RAS_BLOCK__ANY)
+ return (ras->ras_err_state != 0);
+ else
+ return test_bit(block, &ras->ras_err_state) ||
+ test_bit(AMDGPU_RAS_BLOCK__LAST,
+ &ras->ras_err_state);
+ }
+
+ return false;
+}
+
+static struct ras_event_manager *__get_ras_event_mgr(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *ras;
+
+ ras = amdgpu_ras_get_context(adev);
+ if (!ras)
+ return NULL;
+
+ return ras->event_mgr;
+}
+
+int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_type type,
+ const void *caller)
+{
+ struct ras_event_manager *event_mgr;
+ struct ras_event_state *event_state;
+ int ret = 0;
+
+ if (type >= RAS_EVENT_TYPE_COUNT) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ event_mgr = __get_ras_event_mgr(adev);
+ if (!event_mgr) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ event_state = &event_mgr->event_state[type];
+ event_state->last_seqno = atomic64_inc_return(&event_mgr->seqno);
+ atomic64_inc(&event_state->count);
+
+out:
+ if (ret && caller)
+ dev_warn(adev->dev, "failed mark ras event (%d) in %ps, ret:%d\n",
+ (int)type, caller, ret);
+
+ return ret;
+}
+
+u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type)
+{
+ struct ras_event_manager *event_mgr;
+ u64 id;
+
+ if (type >= RAS_EVENT_TYPE_COUNT)
+ return RAS_EVENT_INVALID_ID;
+
+ switch (type) {
+ case RAS_EVENT_TYPE_FATAL:
+ case RAS_EVENT_TYPE_POISON_CREATION:
+ case RAS_EVENT_TYPE_POISON_CONSUMPTION:
+ event_mgr = __get_ras_event_mgr(adev);
+ if (!event_mgr)
+ return RAS_EVENT_INVALID_ID;
+
+ id = event_mgr->event_state[type].last_seqno;
+ break;
+ case RAS_EVENT_TYPE_INVALID:
+ default:
+ id = RAS_EVENT_INVALID_ID;
+ break;
+ }
+
+ return id;
+}
+
+void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
+{
+ if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ enum ras_event_type type = RAS_EVENT_TYPE_FATAL;
+ u64 event_id;
+
+ if (amdgpu_ras_mark_ras_event(adev, type)) {
+ dev_err(adev->dev,
+ "uncorrectable hardware error (ERREVENT_ATHUB_INTERRUPT) detected!\n");
+ return;
+ }
+
+ event_id = amdgpu_ras_acquire_event_id(adev, type);
+
+ RAS_EVENT_LOG(adev, event_id, "uncorrectable hardware error"
+ "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
+
+ amdgpu_ras_set_fed(adev, true);
+ ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
+ amdgpu_ras_reset_gpu(adev);
+ }
+}
+
+bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
+{
+ if (adev->asic_type == CHIP_VEGA20 &&
+ adev->pm.fw_version <= 0x283400) {
+ return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) &&
+ amdgpu_ras_intr_triggered();
+ }
+
+ return false;
+}
+
+void amdgpu_release_ras_context(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!con)
+ return;
+
+ if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
+ con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
+ amdgpu_ras_set_context(adev, NULL);
+ kfree(con);
+ }
+}
+
+#ifdef CONFIG_X86_MCE_AMD
+static struct amdgpu_device *find_adev(uint32_t node_id)
+{
+ int i;
+ struct amdgpu_device *adev = NULL;
+
+ for (i = 0; i < mce_adev_list.num_gpu; i++) {
+ adev = mce_adev_list.devs[i];
+
+ if (adev && adev->gmc.xgmi.connected_to_cpu &&
+ adev->gmc.xgmi.physical_node_id == node_id)
+ break;
+ adev = NULL;
+ }
+
+ return adev;
+}
+
+#define GET_MCA_IPID_GPUID(m) (((m) >> 44) & 0xF)
+#define GET_UMC_INST(m) (((m) >> 21) & 0x7)
+#define GET_CHAN_INDEX(m) ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
+#define GPU_ID_OFFSET 8
+
+static int amdgpu_bad_page_notifier(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct mce *m = (struct mce *)data;
+ struct amdgpu_device *adev = NULL;
+ uint32_t gpu_id = 0;
+ uint32_t umc_inst = 0, ch_inst = 0;
+
+ /*
+ * If the error was generated in UMC_V2, which belongs to GPU UMCs,
+ * and error occurred in DramECC (Extended error code = 0) then only
+ * process the error, else bail out.
+ */
+ if (!m || !((smca_get_bank_type(m->extcpu, m->bank) == SMCA_UMC_V2) &&
+ (XEC(m->status, 0x3f) == 0x0)))
+ return NOTIFY_DONE;
+
+ /*
+ * If it is correctable error, return.
+ */
+ if (mce_is_correctable(m))
+ return NOTIFY_OK;
+
+ /*
+ * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
+ */
+ gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
+
+ adev = find_adev(gpu_id);
+ if (!adev) {
+ DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
+ gpu_id);
+ return NOTIFY_DONE;
+ }
+
+ /*
+ * If it is uncorrectable error, then find out UMC instance and
+ * channel index.
+ */
+ umc_inst = GET_UMC_INST(m->ipid);
+ ch_inst = GET_CHAN_INDEX(m->ipid);
+
+ dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
+ umc_inst, ch_inst);
+
+ if (!amdgpu_umc_page_retirement_mca(adev, m->addr, ch_inst, umc_inst))
+ return NOTIFY_OK;
+ else
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block amdgpu_bad_page_nb = {
+ .notifier_call = amdgpu_bad_page_notifier,
+ .priority = MCE_PRIO_UC,
+};
+
+static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
+{
+ /*
+ * Add the adev to the mce_adev_list.
+ * During mode2 reset, amdgpu device is temporarily
+ * removed from the mgpu_info list which can cause
+ * page retirement to fail.
+ * Use this list instead of mgpu_info to find the amdgpu
+ * device on which the UMC error was reported.
+ */
+ mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
+
+ /*
+ * Register the x86 notifier only once
+ * with MCE subsystem.
+ */
+ if (notifier_registered == false) {
+ mce_register_decode_chain(&amdgpu_bad_page_nb);
+ notifier_registered = true;
+ }
+}
+#endif
+
+struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev)
+{
+ if (!adev)
+ return NULL;
+
+ return adev->psp.ras_context.ras;
+}
+
+int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con)
+{
+ if (!adev)
+ return -EINVAL;
+
+ adev->psp.ras_context.ras = ras_con;
+ return 0;
+}
+
+/* check if ras is supported on block, say, sdma, gfx */
+int amdgpu_ras_is_supported(struct amdgpu_device *adev,
+ unsigned int block)
+{
+ int ret = 0;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ if (block >= AMDGPU_RAS_BLOCK_COUNT)
+ return 0;
+
+ ret = ras && (adev->ras_enabled & (1 << block));
+
+ /* For the special asic with mem ecc enabled but sram ecc
+ * not enabled, even if the ras block is not supported on
+ * .ras_enabled, if the asic supports poison mode and the
+ * ras block has ras configuration, it can be considered
+ * that the ras block supports ras function.
+ */
+ if (!ret &&
+ (block == AMDGPU_RAS_BLOCK__GFX ||
+ block == AMDGPU_RAS_BLOCK__SDMA ||
+ block == AMDGPU_RAS_BLOCK__VCN ||
+ block == AMDGPU_RAS_BLOCK__JPEG) &&
+ (amdgpu_ras_mask & (1 << block)) &&
+ amdgpu_ras_is_poison_mode_supported(adev) &&
+ amdgpu_ras_get_ras_block(adev, block, 0))
+ ret = 1;
+
+ return ret;
+}
+
+int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ /* mode1 is the only selection for RMA status */
+ if (amdgpu_ras_is_rma(adev)) {
+ ras->gpu_reset_flags = 0;
+ ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET;
+ }
+
+ if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) {
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
+ int hive_ras_recovery = 0;
+
+ if (hive) {
+ hive_ras_recovery = atomic_read(&hive->ras_recovery);
+ amdgpu_put_xgmi_hive(hive);
+ }
+ /* In the case of multiple GPUs, after a GPU has started
+ * resetting all GPUs on hive, other GPUs do not need to
+ * trigger GPU reset again.
+ */
+ if (!hive_ras_recovery)
+ amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
+ else
+ atomic_set(&ras->in_recovery, 0);
+ } else {
+ flush_work(&ras->recovery_work);
+ amdgpu_reset_domain_schedule(ras->adev->reset_domain, &ras->recovery_work);
+ }
+
+ return 0;
+}
+
+int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int ret = 0;
+
+ if (con) {
+ ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
+ if (!ret)
+ con->is_aca_debug_mode = enable;
+ }
+
+ return ret;
+}
+
+int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int ret = 0;
+
+ if (con) {
+ if (amdgpu_aca_is_enabled(adev))
+ ret = amdgpu_aca_smu_set_debug_mode(adev, enable);
+ else
+ ret = amdgpu_mca_smu_set_debug_mode(adev, enable);
+ if (!ret)
+ con->is_aca_debug_mode = enable;
+ }
+
+ return ret;
+}
+
+bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
+ const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
+
+ if (!con)
+ return false;
+
+ if ((amdgpu_aca_is_enabled(adev) && smu_funcs && smu_funcs->set_debug_mode) ||
+ (!amdgpu_aca_is_enabled(adev) && mca_funcs && mca_funcs->mca_set_debug_mode))
+ return con->is_aca_debug_mode;
+ else
+ return true;
+}
+
+bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
+ unsigned int *error_query_mode)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
+ const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs;
+
+ if (!con) {
+ *error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY;
+ return false;
+ }
+
+ if (amdgpu_sriov_vf(adev)) {
+ *error_query_mode = AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY;
+ } else if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode)) {
+ *error_query_mode =
+ (con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
+ } else {
+ *error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
+ }
+
+ return true;
+}
+
+/* Register each ip ras block into amdgpu ras */
+int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
+ struct amdgpu_ras_block_object *ras_block_obj)
+{
+ struct amdgpu_ras_block_list *ras_node;
+ if (!adev || !ras_block_obj)
+ return -EINVAL;
+
+ ras_node = kzalloc(sizeof(*ras_node), GFP_KERNEL);
+ if (!ras_node)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ras_node->node);
+ ras_node->ras_obj = ras_block_obj;
+ list_add_tail(&ras_node->node, &adev->ras_list);
+
+ return 0;
+}
+
+void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name)
+{
+ if (!err_type_name)
+ return;
+
+ switch (err_type) {
+ case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
+ sprintf(err_type_name, "correctable");
+ break;
+ case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
+ sprintf(err_type_name, "uncorrectable");
+ break;
+ default:
+ sprintf(err_type_name, "unknown");
+ break;
+ }
+}
+
+bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
+ const struct amdgpu_ras_err_status_reg_entry *reg_entry,
+ uint32_t instance,
+ uint32_t *memory_id)
+{
+ uint32_t err_status_lo_data, err_status_lo_offset;
+
+ if (!reg_entry)
+ return false;
+
+ err_status_lo_offset =
+ AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
+ reg_entry->seg_lo, reg_entry->reg_lo);
+ err_status_lo_data = RREG32(err_status_lo_offset);
+
+ if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) &&
+ !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG))
+ return false;
+
+ *memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID);
+
+ return true;
+}
+
+bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
+ const struct amdgpu_ras_err_status_reg_entry *reg_entry,
+ uint32_t instance,
+ unsigned long *err_cnt)
+{
+ uint32_t err_status_hi_data, err_status_hi_offset;
+
+ if (!reg_entry)
+ return false;
+
+ err_status_hi_offset =
+ AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance,
+ reg_entry->seg_hi, reg_entry->reg_hi);
+ err_status_hi_data = RREG32(err_status_hi_offset);
+
+ if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) &&
+ !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG))
+ /* keep the check here in case we need to refer to the result later */
+ dev_dbg(adev->dev, "Invalid err_info field\n");
+
+ /* read err count */
+ *err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT);
+
+ return true;
+}
+
+void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
+ const struct amdgpu_ras_err_status_reg_entry *reg_list,
+ uint32_t reg_list_size,
+ const struct amdgpu_ras_memory_id_entry *mem_list,
+ uint32_t mem_list_size,
+ uint32_t instance,
+ uint32_t err_type,
+ unsigned long *err_count)
+{
+ uint32_t memory_id;
+ unsigned long err_cnt;
+ char err_type_name[16];
+ uint32_t i, j;
+
+ for (i = 0; i < reg_list_size; i++) {
+ /* query memory_id from err_status_lo */
+ if (!amdgpu_ras_inst_get_memory_id_field(adev, &reg_list[i],
+ instance, &memory_id))
+ continue;
+
+ /* query err_cnt from err_status_hi */
+ if (!amdgpu_ras_inst_get_err_cnt_field(adev, &reg_list[i],
+ instance, &err_cnt) ||
+ !err_cnt)
+ continue;
+
+ *err_count += err_cnt;
+
+ /* log the errors */
+ amdgpu_ras_get_error_type_name(err_type, err_type_name);
+ if (!mem_list) {
+ /* memory_list is not supported */
+ dev_info(adev->dev,
+ "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n",
+ err_cnt, err_type_name,
+ reg_list[i].block_name,
+ instance, memory_id);
+ } else {
+ for (j = 0; j < mem_list_size; j++) {
+ if (memory_id == mem_list[j].memory_id) {
+ dev_info(adev->dev,
+ "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n",
+ err_cnt, err_type_name,
+ reg_list[i].block_name,
+ instance, mem_list[j].name);
+ break;
+ }
+ }
+ }
+ }
+}
+
+void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
+ const struct amdgpu_ras_err_status_reg_entry *reg_list,
+ uint32_t reg_list_size,
+ uint32_t instance)
+{
+ uint32_t err_status_lo_offset, err_status_hi_offset;
+ uint32_t i;
+
+ for (i = 0; i < reg_list_size; i++) {
+ err_status_lo_offset =
+ AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
+ reg_list[i].seg_lo, reg_list[i].reg_lo);
+ err_status_hi_offset =
+ AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance,
+ reg_list[i].seg_hi, reg_list[i].reg_hi);
+ WREG32(err_status_lo_offset, 0);
+ WREG32(err_status_hi_offset, 0);
+ }
+}
+
+int amdgpu_ras_error_data_init(struct ras_err_data *err_data)
+{
+ memset(err_data, 0, sizeof(*err_data));
+
+ INIT_LIST_HEAD(&err_data->err_node_list);
+
+ return 0;
+}
+
+static void amdgpu_ras_error_node_release(struct ras_err_node *err_node)
+{
+ if (!err_node)
+ return;
+
+ list_del(&err_node->node);
+ kvfree(err_node);
+}
+
+void amdgpu_ras_error_data_fini(struct ras_err_data *err_data)
+{
+ struct ras_err_node *err_node, *tmp;
+
+ list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node)
+ amdgpu_ras_error_node_release(err_node);
+}
+
+static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data *err_data,
+ struct amdgpu_smuio_mcm_config_info *mcm_info)
+{
+ struct ras_err_node *err_node;
+ struct amdgpu_smuio_mcm_config_info *ref_id;
+
+ if (!err_data || !mcm_info)
+ return NULL;
+
+ for_each_ras_error(err_node, err_data) {
+ ref_id = &err_node->err_info.mcm_info;
+
+ if (mcm_info->socket_id == ref_id->socket_id &&
+ mcm_info->die_id == ref_id->die_id)
+ return err_node;
+ }
+
+ return NULL;
+}
+
+static struct ras_err_node *amdgpu_ras_error_node_new(void)
+{
+ struct ras_err_node *err_node;
+
+ err_node = kvzalloc(sizeof(*err_node), GFP_KERNEL);
+ if (!err_node)
+ return NULL;
+
+ INIT_LIST_HEAD(&err_node->node);
+
+ return err_node;
+}
+
+static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b)
+{
+ struct ras_err_node *nodea = container_of(a, struct ras_err_node, node);
+ struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node);
+ struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info;
+ struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info;
+
+ if (unlikely(infoa->socket_id != infob->socket_id))
+ return infoa->socket_id - infob->socket_id;
+ else
+ return infoa->die_id - infob->die_id;
+
+ return 0;
+}
+
+static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data,
+ struct amdgpu_smuio_mcm_config_info *mcm_info)
+{
+ struct ras_err_node *err_node;
+
+ err_node = amdgpu_ras_error_find_node_by_id(err_data, mcm_info);
+ if (err_node)
+ return &err_node->err_info;
+
+ err_node = amdgpu_ras_error_node_new();
+ if (!err_node)
+ return NULL;
+
+ memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info));
+
+ err_data->err_list_count++;
+ list_add_tail(&err_node->node, &err_data->err_node_list);
+ list_sort(NULL, &err_data->err_node_list, ras_err_info_cmp);
+
+ return &err_node->err_info;
+}
+
+int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ u64 count)
+{
+ struct ras_err_info *err_info;
+
+ if (!err_data || !mcm_info)
+ return -EINVAL;
+
+ if (!count)
+ return 0;
+
+ err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
+ if (!err_info)
+ return -EINVAL;
+
+ err_info->ue_count += count;
+ err_data->ue_count += count;
+
+ return 0;
+}
+
+int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ u64 count)
+{
+ struct ras_err_info *err_info;
+
+ if (!err_data || !mcm_info)
+ return -EINVAL;
+
+ if (!count)
+ return 0;
+
+ err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
+ if (!err_info)
+ return -EINVAL;
+
+ err_info->ce_count += count;
+ err_data->ce_count += count;
+
+ return 0;
+}
+
+int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ u64 count)
+{
+ struct ras_err_info *err_info;
+
+ if (!err_data || !mcm_info)
+ return -EINVAL;
+
+ if (!count)
+ return 0;
+
+ err_info = amdgpu_ras_error_get_info(err_data, mcm_info);
+ if (!err_info)
+ return -EINVAL;
+
+ err_info->de_count += count;
+ err_data->de_count += count;
+
+ return 0;
+}
+
+#define mmMP0_SMN_C2PMSG_92 0x1609C
+#define mmMP0_SMN_C2PMSG_126 0x160BE
+static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev,
+ u32 instance)
+{
+ u32 socket_id, aid_id, hbm_id;
+ u32 fw_status;
+ u32 boot_error;
+ u64 reg_addr;
+
+ /* The pattern for smn addressing in other SOC could be different from
+ * the one for aqua_vanjaram. We should revisit the code if the pattern
+ * is changed. In such case, replace the aqua_vanjaram implementation
+ * with more common helper */
+ reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
+ aqua_vanjaram_encode_ext_smn_addressing(instance);
+ fw_status = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
+
+ reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) +
+ aqua_vanjaram_encode_ext_smn_addressing(instance);
+ boot_error = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
+
+ socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error);
+ aid_id = AMDGPU_RAS_GPU_ERR_AID_ID(boot_error);
+ hbm_id = ((1 == AMDGPU_RAS_GPU_ERR_HBM_ID(boot_error)) ? 0 : 1);
+
+ if (AMDGPU_RAS_GPU_ERR_MEM_TRAINING(boot_error))
+ dev_info(adev->dev,
+ "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, memory training failed\n",
+ socket_id, aid_id, hbm_id, fw_status);
+
+ if (AMDGPU_RAS_GPU_ERR_FW_LOAD(boot_error))
+ dev_info(adev->dev,
+ "socket: %d, aid: %d, fw_status: 0x%x, firmware load failed at boot time\n",
+ socket_id, aid_id, fw_status);
+
+ if (AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(boot_error))
+ dev_info(adev->dev,
+ "socket: %d, aid: %d, fw_status: 0x%x, wafl link training failed\n",
+ socket_id, aid_id, fw_status);
+
+ if (AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(boot_error))
+ dev_info(adev->dev,
+ "socket: %d, aid: %d, fw_status: 0x%x, xgmi link training failed\n",
+ socket_id, aid_id, fw_status);
+
+ if (AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(boot_error))
+ dev_info(adev->dev,
+ "socket: %d, aid: %d, fw_status: 0x%x, usr cp link training failed\n",
+ socket_id, aid_id, fw_status);
+
+ if (AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(boot_error))
+ dev_info(adev->dev,
+ "socket: %d, aid: %d, fw_status: 0x%x, usr dp link training failed\n",
+ socket_id, aid_id, fw_status);
+
+ if (AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(boot_error))
+ dev_info(adev->dev,
+ "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm memory test failed\n",
+ socket_id, aid_id, hbm_id, fw_status);
+
+ if (AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(boot_error))
+ dev_info(adev->dev,
+ "socket: %d, aid: %d, hbm: %d, fw_status: 0x%x, hbm bist test failed\n",
+ socket_id, aid_id, hbm_id, fw_status);
+
+ if (AMDGPU_RAS_GPU_ERR_DATA_ABORT(boot_error))
+ dev_info(adev->dev,
+ "socket: %d, aid: %d, fw_status: 0x%x, data abort exception\n",
+ socket_id, aid_id, fw_status);
+
+ if (AMDGPU_RAS_GPU_ERR_GENERIC(boot_error))
+ dev_info(adev->dev,
+ "socket: %d, aid: %d, fw_status: 0x%x, Boot Controller Generic Error\n",
+ socket_id, aid_id, fw_status);
+}
+
+static bool amdgpu_ras_boot_error_detected(struct amdgpu_device *adev,
+ u32 instance)
+{
+ u64 reg_addr;
+ u32 reg_data;
+ int retry_loop;
+
+ reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) +
+ aqua_vanjaram_encode_ext_smn_addressing(instance);
+
+ for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) {
+ reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr);
+ if ((reg_data & AMDGPU_RAS_BOOT_STATUS_MASK) == AMDGPU_RAS_BOOT_STEADY_STATUS)
+ return false;
+ else
+ msleep(1);
+ }
+
+ return true;
+}
+
+void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances)
+{
+ u32 i;
+
+ for (i = 0; i < num_instances; i++) {
+ if (amdgpu_ras_boot_error_detected(adev, i))
+ amdgpu_ras_boot_time_error_reporting(adev, i);
+ }
+}
+
+int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
+ uint64_t start = pfn << AMDGPU_GPU_PAGE_SHIFT;
+ int ret = 0;
+
+ if (amdgpu_ras_check_critical_address(adev, start))
+ return 0;
+
+ mutex_lock(&con->page_rsv_lock);
+ ret = amdgpu_vram_mgr_query_page_status(mgr, start);
+ if (ret == -ENOENT)
+ ret = amdgpu_vram_mgr_reserve_range(mgr, start, AMDGPU_GPU_PAGE_SIZE);
+ mutex_unlock(&con->page_rsv_lock);
+
+ return ret;
+}
+
+void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (RAS_EVENT_ID_IS_VALID(event_id))
+ dev_printk(KERN_INFO, adev->dev, "{%llu}%pV", event_id, &vaf);
+ else
+ dev_printk(KERN_INFO, adev->dev, "%pV", &vaf);
+
+ va_end(args);
+}
+
+bool amdgpu_ras_is_rma(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!con)
+ return false;
+
+ return con->is_rma;
+}
+
+int amdgpu_ras_add_critical_region(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct amdgpu_vram_mgr_resource *vres;
+ struct ras_critical_region *region;
+ struct drm_buddy_block *block;
+ int ret = 0;
+
+ if (!bo || !bo->tbo.resource)
+ return -EINVAL;
+
+ vres = to_amdgpu_vram_mgr_resource(bo->tbo.resource);
+
+ mutex_lock(&con->critical_region_lock);
+
+ /* Check if the bo had been recorded */
+ list_for_each_entry(region, &con->critical_region_head, node)
+ if (region->bo == bo)
+ goto out;
+
+ /* Record new critical amdgpu bo */
+ list_for_each_entry(block, &vres->blocks, link) {
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ region->bo = bo;
+ region->start = amdgpu_vram_mgr_block_start(block);
+ region->size = amdgpu_vram_mgr_block_size(block);
+ list_add_tail(&region->node, &con->critical_region_head);
+ }
+
+out:
+ mutex_unlock(&con->critical_region_lock);
+
+ return ret;
+}
+
+static void amdgpu_ras_critical_region_init(struct amdgpu_device *adev)
+{
+ amdgpu_ras_add_critical_region(adev, adev->mman.fw_reserved_memory);
+}
+
+static void amdgpu_ras_critical_region_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_critical_region *region, *tmp;
+
+ mutex_lock(&con->critical_region_lock);
+ list_for_each_entry_safe(region, tmp, &con->critical_region_head, node) {
+ list_del(&region->node);
+ kfree(region);
+ }
+ mutex_unlock(&con->critical_region_lock);
+}
+
+bool amdgpu_ras_check_critical_address(struct amdgpu_device *adev, uint64_t addr)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_critical_region *region;
+ bool ret = false;
+
+ mutex_lock(&con->critical_region_lock);
+ list_for_each_entry(region, &con->critical_region_head, node) {
+ if ((region->start <= addr) &&
+ (addr < (region->start + region->size))) {
+ ret = true;
+ break;
+ }
+ }
+ mutex_unlock(&con->critical_region_lock);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
new file mode 100644
index 000000000000..6cf0dfd38be8
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -0,0 +1,1011 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#ifndef _AMDGPU_RAS_H
+#define _AMDGPU_RAS_H
+
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/kfifo.h>
+#include <linux/radix-tree.h>
+#include "ta_ras_if.h"
+#include "amdgpu_ras_eeprom.h"
+#include "amdgpu_smuio.h"
+#include "amdgpu_aca.h"
+
+struct amdgpu_iv_entry;
+
+#define AMDGPU_RAS_GPU_ERR_MEM_TRAINING(x) AMDGPU_GET_REG_FIELD(x, 0, 0)
+#define AMDGPU_RAS_GPU_ERR_FW_LOAD(x) AMDGPU_GET_REG_FIELD(x, 1, 1)
+#define AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(x) AMDGPU_GET_REG_FIELD(x, 2, 2)
+#define AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(x) AMDGPU_GET_REG_FIELD(x, 3, 3)
+#define AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(x) AMDGPU_GET_REG_FIELD(x, 4, 4)
+#define AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(x) AMDGPU_GET_REG_FIELD(x, 5, 5)
+#define AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(x) AMDGPU_GET_REG_FIELD(x, 6, 6)
+#define AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(x) AMDGPU_GET_REG_FIELD(x, 7, 7)
+#define AMDGPU_RAS_GPU_ERR_SOCKET_ID(x) AMDGPU_GET_REG_FIELD(x, 10, 8)
+#define AMDGPU_RAS_GPU_ERR_AID_ID(x) AMDGPU_GET_REG_FIELD(x, 12, 11)
+#define AMDGPU_RAS_GPU_ERR_HBM_ID(x) AMDGPU_GET_REG_FIELD(x, 14, 13)
+#define AMDGPU_RAS_GPU_ERR_DATA_ABORT(x) AMDGPU_GET_REG_FIELD(x, 29, 29)
+#define AMDGPU_RAS_GPU_ERR_GENERIC(x) AMDGPU_GET_REG_FIELD(x, 30, 30)
+
+#define AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT 100
+#define AMDGPU_RAS_BOOT_STEADY_STATUS 0xBA
+#define AMDGPU_RAS_BOOT_STATUS_MASK 0xFF
+
+#define AMDGPU_RAS_FLAG_INIT_BY_VBIOS (0x1 << 0)
+/* position of instance value in sub_block_index of
+ * ta_ras_trigger_error_input, the sub block uses lower 12 bits
+ */
+#define AMDGPU_RAS_INST_MASK 0xfffff000
+#define AMDGPU_RAS_INST_SHIFT 0xc
+
+#define AMDGPU_RAS_FEATURES_SOCKETID_SHIFT 29
+#define AMDGPU_RAS_FEATURES_SOCKETID_MASK 0xe0000000
+
+/* Reserve 8 physical dram row for possible retirement.
+ * In worst cases, it will lose 8 * 2MB memory in vram domain */
+#define AMDGPU_RAS_RESERVED_VRAM_SIZE_DEFAULT (16ULL << 20)
+/* The high three bits indicates socketid */
+#define AMDGPU_RAS_GET_FEATURES(val) ((val) & ~AMDGPU_RAS_FEATURES_SOCKETID_MASK)
+
+#define RAS_EVENT_INVALID_ID (BIT_ULL(63))
+#define RAS_EVENT_ID_IS_VALID(x) (!((x) & BIT_ULL(63)))
+
+#define RAS_EVENT_LOG(adev, id, fmt, ...) \
+ amdgpu_ras_event_log_print((adev), (id), (fmt), ##__VA_ARGS__)
+
+#define amdgpu_ras_mark_ras_event(adev, type) \
+ (amdgpu_ras_mark_ras_event_caller((adev), (type), __builtin_return_address(0)))
+
+enum amdgpu_ras_block {
+ AMDGPU_RAS_BLOCK__UMC = 0,
+ AMDGPU_RAS_BLOCK__SDMA,
+ AMDGPU_RAS_BLOCK__GFX,
+ AMDGPU_RAS_BLOCK__MMHUB,
+ AMDGPU_RAS_BLOCK__ATHUB,
+ AMDGPU_RAS_BLOCK__PCIE_BIF,
+ AMDGPU_RAS_BLOCK__HDP,
+ AMDGPU_RAS_BLOCK__XGMI_WAFL,
+ AMDGPU_RAS_BLOCK__DF,
+ AMDGPU_RAS_BLOCK__SMN,
+ AMDGPU_RAS_BLOCK__SEM,
+ AMDGPU_RAS_BLOCK__MP0,
+ AMDGPU_RAS_BLOCK__MP1,
+ AMDGPU_RAS_BLOCK__FUSE,
+ AMDGPU_RAS_BLOCK__MCA,
+ AMDGPU_RAS_BLOCK__VCN,
+ AMDGPU_RAS_BLOCK__JPEG,
+ AMDGPU_RAS_BLOCK__IH,
+ AMDGPU_RAS_BLOCK__MPIO,
+ AMDGPU_RAS_BLOCK__MMSCH,
+
+ AMDGPU_RAS_BLOCK__LAST,
+ AMDGPU_RAS_BLOCK__ANY = -1
+};
+
+enum amdgpu_ras_mca_block {
+ AMDGPU_RAS_MCA_BLOCK__MP0 = 0,
+ AMDGPU_RAS_MCA_BLOCK__MP1,
+ AMDGPU_RAS_MCA_BLOCK__MPIO,
+ AMDGPU_RAS_MCA_BLOCK__IOHC,
+
+ AMDGPU_RAS_MCA_BLOCK__LAST
+};
+
+#define AMDGPU_RAS_BLOCK_COUNT AMDGPU_RAS_BLOCK__LAST
+#define AMDGPU_RAS_MCA_BLOCK_COUNT AMDGPU_RAS_MCA_BLOCK__LAST
+#define AMDGPU_RAS_BLOCK_MASK ((1ULL << AMDGPU_RAS_BLOCK_COUNT) - 1)
+
+enum amdgpu_ras_gfx_subblock {
+ /* CPC */
+ AMDGPU_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
+ AMDGPU_RAS_BLOCK__GFX_CPC_SCRATCH =
+ AMDGPU_RAS_BLOCK__GFX_CPC_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_CPC_UCODE,
+ AMDGPU_RAS_BLOCK__GFX_DC_STATE_ME1,
+ AMDGPU_RAS_BLOCK__GFX_DC_CSINVOC_ME1,
+ AMDGPU_RAS_BLOCK__GFX_DC_RESTORE_ME1,
+ AMDGPU_RAS_BLOCK__GFX_DC_STATE_ME2,
+ AMDGPU_RAS_BLOCK__GFX_DC_CSINVOC_ME2,
+ AMDGPU_RAS_BLOCK__GFX_DC_RESTORE_ME2,
+ AMDGPU_RAS_BLOCK__GFX_CPC_INDEX_END =
+ AMDGPU_RAS_BLOCK__GFX_DC_RESTORE_ME2,
+ /* CPF */
+ AMDGPU_RAS_BLOCK__GFX_CPF_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_CPF_ROQ_ME2 =
+ AMDGPU_RAS_BLOCK__GFX_CPF_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_CPF_ROQ_ME1,
+ AMDGPU_RAS_BLOCK__GFX_CPF_TAG,
+ AMDGPU_RAS_BLOCK__GFX_CPF_INDEX_END = AMDGPU_RAS_BLOCK__GFX_CPF_TAG,
+ /* CPG */
+ AMDGPU_RAS_BLOCK__GFX_CPG_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_CPG_DMA_ROQ =
+ AMDGPU_RAS_BLOCK__GFX_CPG_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_CPG_DMA_TAG,
+ AMDGPU_RAS_BLOCK__GFX_CPG_TAG,
+ AMDGPU_RAS_BLOCK__GFX_CPG_INDEX_END = AMDGPU_RAS_BLOCK__GFX_CPG_TAG,
+ /* GDS */
+ AMDGPU_RAS_BLOCK__GFX_GDS_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_GDS_MEM = AMDGPU_RAS_BLOCK__GFX_GDS_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_GDS_INPUT_QUEUE,
+ AMDGPU_RAS_BLOCK__GFX_GDS_OA_PHY_CMD_RAM_MEM,
+ AMDGPU_RAS_BLOCK__GFX_GDS_OA_PHY_DATA_RAM_MEM,
+ AMDGPU_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
+ AMDGPU_RAS_BLOCK__GFX_GDS_INDEX_END =
+ AMDGPU_RAS_BLOCK__GFX_GDS_OA_PIPE_MEM,
+ /* SPI */
+ AMDGPU_RAS_BLOCK__GFX_SPI_SR_MEM,
+ /* SQ */
+ AMDGPU_RAS_BLOCK__GFX_SQ_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_SQ_SGPR = AMDGPU_RAS_BLOCK__GFX_SQ_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_SQ_LDS_D,
+ AMDGPU_RAS_BLOCK__GFX_SQ_LDS_I,
+ AMDGPU_RAS_BLOCK__GFX_SQ_VGPR,
+ AMDGPU_RAS_BLOCK__GFX_SQ_INDEX_END = AMDGPU_RAS_BLOCK__GFX_SQ_VGPR,
+ /* SQC (3 ranges) */
+ AMDGPU_RAS_BLOCK__GFX_SQC_INDEX_START,
+ /* SQC range 0 */
+ AMDGPU_RAS_BLOCK__GFX_SQC_INDEX0_START =
+ AMDGPU_RAS_BLOCK__GFX_SQC_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_SQC_INST_UTCL1_LFIFO =
+ AMDGPU_RAS_BLOCK__GFX_SQC_INDEX0_START,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU0_WRITE_DATA_BUF,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU0_UTCL1_LFIFO,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU1_WRITE_DATA_BUF,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU1_UTCL1_LFIFO,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU2_WRITE_DATA_BUF,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
+ AMDGPU_RAS_BLOCK__GFX_SQC_INDEX0_END =
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_CU2_UTCL1_LFIFO,
+ /* SQC range 1 */
+ AMDGPU_RAS_BLOCK__GFX_SQC_INDEX1_START,
+ AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKA_TAG_RAM =
+ AMDGPU_RAS_BLOCK__GFX_SQC_INDEX1_START,
+ AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKA_UTCL1_MISS_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKA_MISS_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKA_BANK_RAM,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKA_TAG_RAM,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKA_HIT_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKA_MISS_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKA_DIRTY_BIT_RAM,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
+ AMDGPU_RAS_BLOCK__GFX_SQC_INDEX1_END =
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKA_BANK_RAM,
+ /* SQC range 2 */
+ AMDGPU_RAS_BLOCK__GFX_SQC_INDEX2_START,
+ AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKB_TAG_RAM =
+ AMDGPU_RAS_BLOCK__GFX_SQC_INDEX2_START,
+ AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKB_UTCL1_MISS_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKB_MISS_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_SQC_INST_BANKB_BANK_RAM,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKB_TAG_RAM,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKB_HIT_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKB_MISS_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKB_DIRTY_BIT_RAM,
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
+ AMDGPU_RAS_BLOCK__GFX_SQC_INDEX2_END =
+ AMDGPU_RAS_BLOCK__GFX_SQC_DATA_BANKB_BANK_RAM,
+ AMDGPU_RAS_BLOCK__GFX_SQC_INDEX_END =
+ AMDGPU_RAS_BLOCK__GFX_SQC_INDEX2_END,
+ /* TA */
+ AMDGPU_RAS_BLOCK__GFX_TA_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_TA_FS_DFIFO =
+ AMDGPU_RAS_BLOCK__GFX_TA_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_TA_FS_AFIFO,
+ AMDGPU_RAS_BLOCK__GFX_TA_FL_LFIFO,
+ AMDGPU_RAS_BLOCK__GFX_TA_FX_LFIFO,
+ AMDGPU_RAS_BLOCK__GFX_TA_FS_CFIFO,
+ AMDGPU_RAS_BLOCK__GFX_TA_INDEX_END = AMDGPU_RAS_BLOCK__GFX_TA_FS_CFIFO,
+ /* TCA */
+ AMDGPU_RAS_BLOCK__GFX_TCA_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_TCA_HOLE_FIFO =
+ AMDGPU_RAS_BLOCK__GFX_TCA_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_TCA_REQ_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_TCA_INDEX_END =
+ AMDGPU_RAS_BLOCK__GFX_TCA_REQ_FIFO,
+ /* TCC (5 sub-ranges) */
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX_START,
+ /* TCC range 0 */
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX0_START =
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DATA =
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX0_START,
+ AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_0_1,
+ AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_0,
+ AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DATA_BANK_1_1,
+ AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_0,
+ AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_DIRTY_BANK_1,
+ AMDGPU_RAS_BLOCK__GFX_TCC_HIGH_RATE_TAG,
+ AMDGPU_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX0_END =
+ AMDGPU_RAS_BLOCK__GFX_TCC_LOW_RATE_TAG,
+ /* TCC range 1 */
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX1_START,
+ AMDGPU_RAS_BLOCK__GFX_TCC_IN_USE_DEC =
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX1_START,
+ AMDGPU_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX1_END =
+ AMDGPU_RAS_BLOCK__GFX_TCC_IN_USE_TRANSFER,
+ /* TCC range 2 */
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX2_START,
+ AMDGPU_RAS_BLOCK__GFX_TCC_RETURN_DATA =
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX2_START,
+ AMDGPU_RAS_BLOCK__GFX_TCC_RETURN_CONTROL,
+ AMDGPU_RAS_BLOCK__GFX_TCC_UC_ATOMIC_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_TCC_WRITE_RETURN,
+ AMDGPU_RAS_BLOCK__GFX_TCC_WRITE_CACHE_READ,
+ AMDGPU_RAS_BLOCK__GFX_TCC_SRC_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_TCC_SRC_FIFO_NEXT_RAM,
+ AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX2_END =
+ AMDGPU_RAS_BLOCK__GFX_TCC_CACHE_TAG_PROBE_FIFO,
+ /* TCC range 3 */
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX3_START,
+ AMDGPU_RAS_BLOCK__GFX_TCC_LATENCY_FIFO =
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX3_START,
+ AMDGPU_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX3_END =
+ AMDGPU_RAS_BLOCK__GFX_TCC_LATENCY_FIFO_NEXT_RAM,
+ /* TCC range 4 */
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX4_START,
+ AMDGPU_RAS_BLOCK__GFX_TCC_WRRET_TAG_WRITE_RETURN =
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX4_START,
+ AMDGPU_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX4_END =
+ AMDGPU_RAS_BLOCK__GFX_TCC_ATOMIC_RETURN_BUFFER,
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX_END =
+ AMDGPU_RAS_BLOCK__GFX_TCC_INDEX4_END,
+ /* TCI */
+ AMDGPU_RAS_BLOCK__GFX_TCI_WRITE_RAM,
+ /* TCP */
+ AMDGPU_RAS_BLOCK__GFX_TCP_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_TCP_CACHE_RAM =
+ AMDGPU_RAS_BLOCK__GFX_TCP_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_TCP_LFIFO_RAM,
+ AMDGPU_RAS_BLOCK__GFX_TCP_CMD_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_TCP_VM_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_TCP_DB_RAM,
+ AMDGPU_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO0,
+ AMDGPU_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
+ AMDGPU_RAS_BLOCK__GFX_TCP_INDEX_END =
+ AMDGPU_RAS_BLOCK__GFX_TCP_UTCL1_LFIFO1,
+ /* TD */
+ AMDGPU_RAS_BLOCK__GFX_TD_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_TD_SS_FIFO_LO =
+ AMDGPU_RAS_BLOCK__GFX_TD_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_TD_SS_FIFO_HI,
+ AMDGPU_RAS_BLOCK__GFX_TD_CS_FIFO,
+ AMDGPU_RAS_BLOCK__GFX_TD_INDEX_END = AMDGPU_RAS_BLOCK__GFX_TD_CS_FIFO,
+ /* EA (3 sub-ranges) */
+ AMDGPU_RAS_BLOCK__GFX_EA_INDEX_START,
+ /* EA range 0 */
+ AMDGPU_RAS_BLOCK__GFX_EA_INDEX0_START =
+ AMDGPU_RAS_BLOCK__GFX_EA_INDEX_START,
+ AMDGPU_RAS_BLOCK__GFX_EA_DRAMRD_CMDMEM =
+ AMDGPU_RAS_BLOCK__GFX_EA_INDEX0_START,
+ AMDGPU_RAS_BLOCK__GFX_EA_DRAMWR_CMDMEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_DRAMWR_DATAMEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_RRET_TAGMEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_WRET_TAGMEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_GMIRD_CMDMEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_GMIWR_CMDMEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_INDEX0_END =
+ AMDGPU_RAS_BLOCK__GFX_EA_GMIWR_DATAMEM,
+ /* EA range 1 */
+ AMDGPU_RAS_BLOCK__GFX_EA_INDEX1_START,
+ AMDGPU_RAS_BLOCK__GFX_EA_DRAMRD_PAGEMEM =
+ AMDGPU_RAS_BLOCK__GFX_EA_INDEX1_START,
+ AMDGPU_RAS_BLOCK__GFX_EA_DRAMWR_PAGEMEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_IORD_CMDMEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_IOWR_CMDMEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_IOWR_DATAMEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_GMIRD_PAGEMEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_INDEX1_END =
+ AMDGPU_RAS_BLOCK__GFX_EA_GMIWR_PAGEMEM,
+ /* EA range 2 */
+ AMDGPU_RAS_BLOCK__GFX_EA_INDEX2_START,
+ AMDGPU_RAS_BLOCK__GFX_EA_MAM_D0MEM =
+ AMDGPU_RAS_BLOCK__GFX_EA_INDEX2_START,
+ AMDGPU_RAS_BLOCK__GFX_EA_MAM_D1MEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_MAM_D2MEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_MAM_D3MEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_INDEX2_END =
+ AMDGPU_RAS_BLOCK__GFX_EA_MAM_D3MEM,
+ AMDGPU_RAS_BLOCK__GFX_EA_INDEX_END =
+ AMDGPU_RAS_BLOCK__GFX_EA_INDEX2_END,
+ /* UTC VM L2 bank */
+ AMDGPU_RAS_BLOCK__UTC_VML2_BANK_CACHE,
+ /* UTC VM walker */
+ AMDGPU_RAS_BLOCK__UTC_VML2_WALKER,
+ /* UTC ATC L2 2MB cache */
+ AMDGPU_RAS_BLOCK__UTC_ATCL2_CACHE_2M_BANK,
+ /* UTC ATC L2 4KB cache */
+ AMDGPU_RAS_BLOCK__UTC_ATCL2_CACHE_4K_BANK,
+ AMDGPU_RAS_BLOCK__GFX_MAX
+};
+
+enum amdgpu_ras_error_type {
+ AMDGPU_RAS_ERROR__NONE = 0,
+ AMDGPU_RAS_ERROR__PARITY = 1,
+ AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE = 2,
+ AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE = 4,
+ AMDGPU_RAS_ERROR__POISON = 8,
+};
+
+enum amdgpu_ras_ret {
+ AMDGPU_RAS_SUCCESS = 0,
+ AMDGPU_RAS_FAIL,
+ AMDGPU_RAS_UE,
+ AMDGPU_RAS_CE,
+ AMDGPU_RAS_PT,
+};
+
+enum amdgpu_ras_error_query_mode {
+ AMDGPU_RAS_INVALID_ERROR_QUERY = 0,
+ AMDGPU_RAS_DIRECT_ERROR_QUERY = 1,
+ AMDGPU_RAS_FIRMWARE_ERROR_QUERY = 2,
+ AMDGPU_RAS_VIRT_ERROR_COUNT_QUERY = 3,
+};
+
+/* ras error status reisger fields */
+#define ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT 0x0
+#define ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK 0x00000001L
+#define ERR_STATUS_LO__MEMORY_ID__SHIFT 0x18
+#define ERR_STATUS_LO__MEMORY_ID_MASK 0xFF000000L
+#define ERR_STATUS_HI__ERR_INFO_VALID_FLAG__SHIFT 0x2
+#define ERR_STATUS_HI__ERR_INFO_VALID_FLAG_MASK 0x00000004L
+#define ERR_STATUS__ERR_CNT__SHIFT 0x17
+#define ERR_STATUS__ERR_CNT_MASK 0x03800000L
+
+#define AMDGPU_RAS_REG_ENTRY(ip, inst, reg_lo, reg_hi) \
+ ip##_HWIP, inst, reg_lo##_BASE_IDX, reg_lo, reg_hi##_BASE_IDX, reg_hi
+
+#define AMDGPU_RAS_REG_ENTRY_OFFSET(hwip, ip_inst, segment, reg) \
+ (adev->reg_offset[hwip][ip_inst][segment] + (reg))
+
+#define AMDGPU_RAS_ERR_INFO_VALID (1 << 0)
+#define AMDGPU_RAS_ERR_STATUS_VALID (1 << 1)
+#define AMDGPU_RAS_ERR_ADDRESS_VALID (1 << 2)
+
+#define AMDGPU_RAS_GPU_RESET_MODE2_RESET (0x1 << 0)
+#define AMDGPU_RAS_GPU_RESET_MODE1_RESET (0x1 << 1)
+
+struct amdgpu_ras_err_status_reg_entry {
+ uint32_t hwip;
+ uint32_t ip_inst;
+ uint32_t seg_lo;
+ uint32_t reg_lo;
+ uint32_t seg_hi;
+ uint32_t reg_hi;
+ uint32_t reg_inst;
+ uint32_t flags;
+ const char *block_name;
+};
+
+struct amdgpu_ras_memory_id_entry {
+ uint32_t memory_id;
+ const char *name;
+};
+
+struct ras_common_if {
+ enum amdgpu_ras_block block;
+ enum amdgpu_ras_error_type type;
+ uint32_t sub_block_index;
+ char name[32];
+};
+
+#define MAX_UMC_CHANNEL_NUM 32
+
+struct ecc_info_per_ch {
+ uint16_t ce_count_lo_chip;
+ uint16_t ce_count_hi_chip;
+ uint64_t mca_umc_status;
+ uint64_t mca_umc_addr;
+ uint64_t mca_ceumc_addr;
+};
+
+struct umc_ecc_info {
+ struct ecc_info_per_ch ecc[MAX_UMC_CHANNEL_NUM];
+
+ /* Determine smu ecctable whether support
+ * record correctable error address
+ */
+ int record_ce_addr_supported;
+};
+
+enum ras_event_type {
+ RAS_EVENT_TYPE_INVALID = 0,
+ RAS_EVENT_TYPE_FATAL,
+ RAS_EVENT_TYPE_POISON_CREATION,
+ RAS_EVENT_TYPE_POISON_CONSUMPTION,
+ RAS_EVENT_TYPE_COUNT,
+};
+
+struct ras_event_state {
+ u64 last_seqno;
+ atomic64_t count;
+};
+
+struct ras_event_manager {
+ atomic64_t seqno;
+ struct ras_event_state event_state[RAS_EVENT_TYPE_COUNT];
+};
+
+struct ras_event_id {
+ enum ras_event_type type;
+ u64 event_id;
+};
+
+struct ras_query_context {
+ struct ras_event_id evid;
+};
+
+typedef int (*pasid_notify)(struct amdgpu_device *adev,
+ uint16_t pasid, void *data);
+
+struct ras_poison_msg {
+ enum amdgpu_ras_block block;
+ uint16_t pasid;
+ uint32_t reset;
+ pasid_notify pasid_fn;
+ void *data;
+};
+
+struct ras_err_pages {
+ uint32_t count;
+ uint64_t *pfn;
+};
+
+struct ras_ecc_err {
+ uint64_t status;
+ uint64_t ipid;
+ uint64_t addr;
+ uint64_t pa_pfn;
+ /* save global channel index across all UMC instances */
+ uint32_t channel_idx;
+ struct ras_err_pages err_pages;
+};
+
+struct ras_ecc_log_info {
+ struct mutex lock;
+ struct radix_tree_root de_page_tree;
+ uint64_t de_queried_count;
+ uint64_t consumption_q_count;
+};
+
+struct ras_critical_region {
+ struct list_head node;
+ struct amdgpu_bo *bo;
+ uint64_t start;
+ uint64_t size;
+};
+
+struct amdgpu_ras {
+ /* ras infrastructure */
+ /* for ras itself. */
+ uint32_t features;
+ uint32_t schema;
+ struct list_head head;
+ /* sysfs */
+ struct device_attribute features_attr;
+ struct device_attribute version_attr;
+ struct device_attribute schema_attr;
+ struct device_attribute event_state_attr;
+ struct bin_attribute badpages_attr;
+ struct dentry *de_ras_eeprom_table;
+ /* block array */
+ struct ras_manager *objs;
+
+ /* gpu recovery */
+ struct work_struct recovery_work;
+ atomic_t in_recovery;
+ atomic_t rma_in_recovery;
+ struct amdgpu_device *adev;
+ /* error handler data */
+ struct ras_err_handler_data *eh_data;
+ struct mutex recovery_lock;
+
+ uint32_t flags;
+ bool reboot;
+ struct amdgpu_ras_eeprom_control eeprom_control;
+
+ bool error_query_ready;
+
+ /* bad page count threshold */
+ uint32_t bad_page_cnt_threshold;
+
+ /* disable ras error count harvest in recovery */
+ bool disable_ras_err_cnt_harvest;
+
+ /* is poison mode supported */
+ bool poison_supported;
+
+ /* RAS count errors delayed work */
+ struct delayed_work ras_counte_delay_work;
+ atomic_t ras_ue_count;
+ atomic_t ras_ce_count;
+
+ /* record umc error info queried from smu */
+ struct umc_ecc_info umc_ecc;
+
+ /* Indicates smu whether need update bad channel info */
+ bool update_channel_flag;
+ /* Record status of smu mca debug mode */
+ bool is_aca_debug_mode;
+ bool is_rma;
+
+ /* Record special requirements of gpu reset caller */
+ uint32_t gpu_reset_flags;
+
+ struct task_struct *page_retirement_thread;
+ wait_queue_head_t page_retirement_wq;
+ struct mutex page_retirement_lock;
+ atomic_t page_retirement_req_cnt;
+ atomic_t poison_creation_count;
+ atomic_t poison_consumption_count;
+ struct mutex page_rsv_lock;
+ DECLARE_KFIFO(poison_fifo, struct ras_poison_msg, 128);
+ struct ras_ecc_log_info umc_ecc_log;
+ struct delayed_work page_retirement_dwork;
+
+ /* ras errors detected */
+ unsigned long ras_err_state;
+
+ /* RAS event manager */
+ struct ras_event_manager __event_mgr;
+ struct ras_event_manager *event_mgr;
+
+ uint64_t reserved_pages_in_bytes;
+
+ pid_t init_task_pid;
+ char init_task_comm[TASK_COMM_LEN];
+
+ int bad_page_num;
+
+ struct list_head critical_region_head;
+ struct mutex critical_region_lock;
+
+ /* Protect poison injection */
+ struct mutex poison_lock;
+};
+
+struct ras_fs_data {
+ char sysfs_name[48];
+ char debugfs_name[32];
+};
+
+struct ras_err_info {
+ struct amdgpu_smuio_mcm_config_info mcm_info;
+ u64 ce_count;
+ u64 ue_count;
+ u64 de_count;
+};
+
+struct ras_err_node {
+ struct list_head node;
+ struct ras_err_info err_info;
+};
+
+struct ras_err_data {
+ unsigned long ue_count;
+ unsigned long ce_count;
+ unsigned long de_count;
+ unsigned long err_addr_cnt;
+ struct eeprom_table_record *err_addr;
+ unsigned long err_addr_len;
+ u32 err_list_count;
+ struct list_head err_node_list;
+};
+
+#define for_each_ras_error(err_node, err_data) \
+ list_for_each_entry(err_node, &(err_data)->err_node_list, node)
+
+struct ras_err_handler_data {
+ /* point to bad page records array */
+ struct eeprom_table_record *bps;
+ /* the count of entries */
+ int count;
+ int count_saved;
+ /* the space can place new entries */
+ int space_left;
+};
+
+typedef int (*ras_ih_cb)(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry);
+
+struct ras_ih_data {
+ /* interrupt bottom half */
+ struct work_struct ih_work;
+ int inuse;
+ /* IP callback */
+ ras_ih_cb cb;
+ /* full of entries */
+ unsigned char *ring;
+ unsigned int ring_size;
+ unsigned int element_size;
+ unsigned int aligned_element_size;
+ unsigned int rptr;
+ unsigned int wptr;
+};
+
+struct ras_manager {
+ struct ras_common_if head;
+ /* reference count */
+ int use;
+ /* ras block link */
+ struct list_head node;
+ /* the device */
+ struct amdgpu_device *adev;
+ /* sysfs */
+ struct device_attribute sysfs_attr;
+ int attr_inuse;
+
+ /* fs node name */
+ struct ras_fs_data fs_data;
+
+ /* IH data */
+ struct ras_ih_data ih_data;
+
+ struct ras_err_data err_data;
+
+ struct aca_handle aca_handle;
+};
+
+struct ras_badpage {
+ unsigned int bp;
+ unsigned int size;
+ unsigned int flags;
+};
+
+/* interfaces for IP */
+struct ras_fs_if {
+ struct ras_common_if head;
+ const char* sysfs_name;
+ char debugfs_name[32];
+};
+
+struct ras_query_if {
+ struct ras_common_if head;
+ unsigned long ue_count;
+ unsigned long ce_count;
+ unsigned long de_count;
+};
+
+struct ras_inject_if {
+ struct ras_common_if head;
+ uint64_t address;
+ uint64_t value;
+ uint32_t instance_mask;
+};
+
+struct ras_cure_if {
+ struct ras_common_if head;
+ uint64_t address;
+};
+
+struct ras_ih_if {
+ struct ras_common_if head;
+ ras_ih_cb cb;
+};
+
+struct ras_dispatch_if {
+ struct ras_common_if head;
+ struct amdgpu_iv_entry *entry;
+};
+
+struct ras_debug_if {
+ union {
+ struct ras_common_if head;
+ struct ras_inject_if inject;
+ };
+ int op;
+};
+
+struct amdgpu_ras_block_object {
+ struct ras_common_if ras_comm;
+
+ int (*ras_block_match)(struct amdgpu_ras_block_object *block_obj,
+ enum amdgpu_ras_block block, uint32_t sub_block_index);
+ int (*ras_late_init)(struct amdgpu_device *adev, struct ras_common_if *ras_block);
+ void (*ras_fini)(struct amdgpu_device *adev, struct ras_common_if *ras_block);
+ ras_ih_cb ras_cb;
+ const struct amdgpu_ras_block_hw_ops *hw_ops;
+};
+
+struct amdgpu_ras_block_hw_ops {
+ int (*ras_error_inject)(struct amdgpu_device *adev,
+ void *inject_if, uint32_t instance_mask);
+ void (*query_ras_error_count)(struct amdgpu_device *adev, void *ras_error_status);
+ void (*query_ras_error_status)(struct amdgpu_device *adev);
+ void (*query_ras_error_address)(struct amdgpu_device *adev, void *ras_error_status);
+ void (*reset_ras_error_count)(struct amdgpu_device *adev);
+ void (*reset_ras_error_status)(struct amdgpu_device *adev);
+ bool (*query_poison_status)(struct amdgpu_device *adev);
+ bool (*handle_poison_consumption)(struct amdgpu_device *adev);
+};
+
+/* work flow
+ * vbios
+ * 1: ras feature enable (enabled by default)
+ * psp
+ * 2: ras framework init (in ip_init)
+ * IP
+ * 3: IH add
+ * 4: debugfs/sysfs create
+ * 5: query/inject
+ * 6: debugfs/sysfs remove
+ * 7: IH remove
+ * 8: feature disable
+ */
+
+int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev);
+int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info);
+
+void amdgpu_ras_resume(struct amdgpu_device *adev);
+void amdgpu_ras_suspend(struct amdgpu_device *adev);
+
+int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
+ unsigned long *ce_count,
+ unsigned long *ue_count,
+ struct ras_query_if *query_info);
+
+/* error handling functions */
+int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps, int pages, bool from_rom);
+
+int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev,
+ unsigned long *new_cnt);
+
+static inline enum ta_ras_block
+amdgpu_ras_block_to_ta(enum amdgpu_ras_block block) {
+ switch (block) {
+ case AMDGPU_RAS_BLOCK__UMC:
+ return TA_RAS_BLOCK__UMC;
+ case AMDGPU_RAS_BLOCK__SDMA:
+ return TA_RAS_BLOCK__SDMA;
+ case AMDGPU_RAS_BLOCK__GFX:
+ return TA_RAS_BLOCK__GFX;
+ case AMDGPU_RAS_BLOCK__MMHUB:
+ return TA_RAS_BLOCK__MMHUB;
+ case AMDGPU_RAS_BLOCK__ATHUB:
+ return TA_RAS_BLOCK__ATHUB;
+ case AMDGPU_RAS_BLOCK__PCIE_BIF:
+ return TA_RAS_BLOCK__PCIE_BIF;
+ case AMDGPU_RAS_BLOCK__HDP:
+ return TA_RAS_BLOCK__HDP;
+ case AMDGPU_RAS_BLOCK__XGMI_WAFL:
+ return TA_RAS_BLOCK__XGMI_WAFL;
+ case AMDGPU_RAS_BLOCK__DF:
+ return TA_RAS_BLOCK__DF;
+ case AMDGPU_RAS_BLOCK__SMN:
+ return TA_RAS_BLOCK__SMN;
+ case AMDGPU_RAS_BLOCK__SEM:
+ return TA_RAS_BLOCK__SEM;
+ case AMDGPU_RAS_BLOCK__MP0:
+ return TA_RAS_BLOCK__MP0;
+ case AMDGPU_RAS_BLOCK__MP1:
+ return TA_RAS_BLOCK__MP1;
+ case AMDGPU_RAS_BLOCK__FUSE:
+ return TA_RAS_BLOCK__FUSE;
+ case AMDGPU_RAS_BLOCK__MCA:
+ return TA_RAS_BLOCK__MCA;
+ case AMDGPU_RAS_BLOCK__VCN:
+ return TA_RAS_BLOCK__VCN;
+ case AMDGPU_RAS_BLOCK__JPEG:
+ return TA_RAS_BLOCK__JPEG;
+ case AMDGPU_RAS_BLOCK__IH:
+ return TA_RAS_BLOCK__IH;
+ case AMDGPU_RAS_BLOCK__MPIO:
+ return TA_RAS_BLOCK__MPIO;
+ case AMDGPU_RAS_BLOCK__MMSCH:
+ return TA_RAS_BLOCK__MMSCH;
+ default:
+ WARN_ONCE(1, "RAS ERROR: unexpected block id %d\n", block);
+ return TA_RAS_BLOCK__UMC;
+ }
+}
+
+static inline enum ta_ras_error_type
+amdgpu_ras_error_to_ta(enum amdgpu_ras_error_type error) {
+ switch (error) {
+ case AMDGPU_RAS_ERROR__NONE:
+ return TA_RAS_ERROR__NONE;
+ case AMDGPU_RAS_ERROR__PARITY:
+ return TA_RAS_ERROR__PARITY;
+ case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
+ return TA_RAS_ERROR__SINGLE_CORRECTABLE;
+ case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
+ return TA_RAS_ERROR__MULTI_UNCORRECTABLE;
+ case AMDGPU_RAS_ERROR__POISON:
+ return TA_RAS_ERROR__POISON;
+ default:
+ WARN_ONCE(1, "RAS ERROR: unexpected error type %d\n", error);
+ return TA_RAS_ERROR__NONE;
+ }
+}
+
+/* called in ip_init and ip_fini */
+int amdgpu_ras_init(struct amdgpu_device *adev);
+int amdgpu_ras_late_init(struct amdgpu_device *adev);
+int amdgpu_ras_fini(struct amdgpu_device *adev);
+int amdgpu_ras_pre_fini(struct amdgpu_device *adev);
+
+int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block);
+
+void amdgpu_ras_block_late_fini(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block);
+
+int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
+ struct ras_common_if *head, bool enable);
+
+int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
+ struct ras_common_if *head, bool enable);
+
+int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
+ struct ras_common_if *head);
+
+int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
+ struct ras_common_if *head);
+
+void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev);
+
+int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
+ struct ras_query_if *info);
+
+int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block);
+int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block);
+
+int amdgpu_ras_error_inject(struct amdgpu_device *adev,
+ struct ras_inject_if *info);
+
+int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
+ struct ras_common_if *head);
+
+int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
+ struct ras_common_if *head);
+
+int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
+ struct ras_dispatch_if *info);
+
+struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
+ struct ras_common_if *head);
+
+extern atomic_t amdgpu_ras_in_intr;
+
+static inline bool amdgpu_ras_intr_triggered(void)
+{
+ return !!atomic_read(&amdgpu_ras_in_intr);
+}
+
+static inline void amdgpu_ras_intr_cleared(void)
+{
+ atomic_set(&amdgpu_ras_in_intr, 0);
+}
+
+void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
+
+void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready);
+
+bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev);
+
+void amdgpu_release_ras_context(struct amdgpu_device *adev);
+
+int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev);
+
+const char *get_ras_block_str(struct ras_common_if *ras_block);
+
+bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev);
+
+int amdgpu_ras_is_supported(struct amdgpu_device *adev, unsigned int block);
+
+int amdgpu_ras_reset_gpu(struct amdgpu_device *adev);
+
+struct amdgpu_ras* amdgpu_ras_get_context(struct amdgpu_device *adev);
+
+int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con);
+
+int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable);
+int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable);
+bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev);
+bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
+ unsigned int *mode);
+
+int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
+ struct amdgpu_ras_block_object *ras_block_obj);
+void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev);
+void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name);
+bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev,
+ const struct amdgpu_ras_err_status_reg_entry *reg_entry,
+ uint32_t instance,
+ uint32_t *memory_id);
+bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev,
+ const struct amdgpu_ras_err_status_reg_entry *reg_entry,
+ uint32_t instance,
+ unsigned long *err_cnt);
+void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev,
+ const struct amdgpu_ras_err_status_reg_entry *reg_list,
+ uint32_t reg_list_size,
+ const struct amdgpu_ras_memory_id_entry *mem_list,
+ uint32_t mem_list_size,
+ uint32_t instance,
+ uint32_t err_type,
+ unsigned long *err_count);
+void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev,
+ const struct amdgpu_ras_err_status_reg_entry *reg_list,
+ uint32_t reg_list_size,
+ uint32_t instance);
+
+int amdgpu_ras_error_data_init(struct ras_err_data *err_data);
+void amdgpu_ras_error_data_fini(struct ras_err_data *err_data);
+int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data,
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ u64 count);
+int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data,
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ u64 count);
+int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data,
+ struct amdgpu_smuio_mcm_config_info *mcm_info,
+ u64 count);
+void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances);
+int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
+ const struct aca_info *aca_info, void *data);
+int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk);
+
+ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr,
+ struct aca_handle *handle, char *buf, void *data);
+
+void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status);
+bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev);
+void amdgpu_ras_set_err_poison(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block);
+void amdgpu_ras_clear_err_state(struct amdgpu_device *adev);
+bool amdgpu_ras_is_err_state(struct amdgpu_device *adev, int block);
+
+u64 amdgpu_ras_acquire_event_id(struct amdgpu_device *adev, enum ras_event_type type);
+int amdgpu_ras_mark_ras_event_caller(struct amdgpu_device *adev, enum ras_event_type type,
+ const void *caller);
+
+int amdgpu_ras_reserve_page(struct amdgpu_device *adev, uint64_t pfn);
+
+int amdgpu_ras_add_critical_region(struct amdgpu_device *adev, struct amdgpu_bo *bo);
+bool amdgpu_ras_check_critical_address(struct amdgpu_device *adev, uint64_t addr);
+
+int amdgpu_ras_put_poison_req(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, uint16_t pasid,
+ pasid_notify pasid_fn, void *data, uint32_t reset);
+
+bool amdgpu_ras_in_recovery(struct amdgpu_device *adev);
+
+__printf(3, 4)
+void amdgpu_ras_event_log_print(struct amdgpu_device *adev, u64 event_id,
+ const char *fmt, ...);
+
+bool amdgpu_ras_is_rma(struct amdgpu_device *adev);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
new file mode 100644
index 000000000000..3eb3fb55ccb0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -0,0 +1,1564 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu_ras_eeprom.h"
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+#include <linux/bits.h>
+#include "atom.h"
+#include "amdgpu_eeprom.h"
+#include "amdgpu_atomfirmware.h"
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+
+#include "amdgpu_reset.h"
+
+/* These are memory addresses as would be seen by one or more EEPROM
+ * chips strung on the I2C bus, usually by manipulating pins 1-3 of a
+ * set of EEPROM devices. They form a continuous memory space.
+ *
+ * The I2C device address includes the device type identifier, 1010b,
+ * which is a reserved value and indicates that this is an I2C EEPROM
+ * device. It also includes the top 3 bits of the 19 bit EEPROM memory
+ * address, namely bits 18, 17, and 16. This makes up the 7 bit
+ * address sent on the I2C bus with bit 0 being the direction bit,
+ * which is not represented here, and sent by the hardware directly.
+ *
+ * For instance,
+ * 50h = 1010000b => device type identifier 1010b, bits 18:16 = 000b, address 0.
+ * 54h = 1010100b => --"--, bits 18:16 = 100b, address 40000h.
+ * 56h = 1010110b => --"--, bits 18:16 = 110b, address 60000h.
+ * Depending on the size of the I2C EEPROM device(s), bits 18:16 may
+ * address memory in a device or a device on the I2C bus, depending on
+ * the status of pins 1-3. See top of amdgpu_eeprom.c.
+ *
+ * The RAS table lives either at address 0 or address 40000h of EEPROM.
+ */
+#define EEPROM_I2C_MADDR_0 0x0
+#define EEPROM_I2C_MADDR_4 0x40000
+
+/*
+ * The 2 macros below represent the actual size in bytes that
+ * those entities occupy in the EEPROM memory.
+ * RAS_TABLE_RECORD_SIZE is different than sizeof(eeprom_table_record) which
+ * uses uint64 to store 6b fields such as retired_page.
+ */
+#define RAS_TABLE_HEADER_SIZE 20
+#define RAS_TABLE_RECORD_SIZE 24
+
+/* Table hdr is 'AMDR' */
+#define RAS_TABLE_HDR_VAL 0x414d4452
+
+/* Bad GPU tag ‘BADG’ */
+#define RAS_TABLE_HDR_BAD 0x42414447
+
+/*
+ * EEPROM Table structure v1
+ * ---------------------------------
+ * | |
+ * | EEPROM TABLE HEADER |
+ * | ( size 20 Bytes ) |
+ * | |
+ * ---------------------------------
+ * | |
+ * | BAD PAGE RECORD AREA |
+ * | |
+ * ---------------------------------
+ */
+
+/* Assume 2-Mbit size EEPROM and take up the whole space. */
+#define RAS_TBL_SIZE_BYTES (256 * 1024)
+#define RAS_TABLE_START 0
+#define RAS_HDR_START RAS_TABLE_START
+#define RAS_RECORD_START (RAS_HDR_START + RAS_TABLE_HEADER_SIZE)
+#define RAS_MAX_RECORD_COUNT ((RAS_TBL_SIZE_BYTES - RAS_TABLE_HEADER_SIZE) \
+ / RAS_TABLE_RECORD_SIZE)
+
+/*
+ * EEPROM Table structrue v2.1
+ * ---------------------------------
+ * | |
+ * | EEPROM TABLE HEADER |
+ * | ( size 20 Bytes ) |
+ * | |
+ * ---------------------------------
+ * | |
+ * | EEPROM TABLE RAS INFO |
+ * | (available info size 4 Bytes) |
+ * | ( reserved size 252 Bytes ) |
+ * | |
+ * ---------------------------------
+ * | |
+ * | BAD PAGE RECORD AREA |
+ * | |
+ * ---------------------------------
+ */
+
+/* EEPROM Table V2_1 */
+#define RAS_TABLE_V2_1_INFO_SIZE 256
+#define RAS_TABLE_V2_1_INFO_START RAS_TABLE_HEADER_SIZE
+#define RAS_RECORD_START_V2_1 (RAS_HDR_START + RAS_TABLE_HEADER_SIZE + \
+ RAS_TABLE_V2_1_INFO_SIZE)
+#define RAS_MAX_RECORD_COUNT_V2_1 ((RAS_TBL_SIZE_BYTES - RAS_TABLE_HEADER_SIZE - \
+ RAS_TABLE_V2_1_INFO_SIZE) \
+ / RAS_TABLE_RECORD_SIZE)
+
+/* Given a zero-based index of an EEPROM RAS record, yields the EEPROM
+ * offset off of RAS_TABLE_START. That is, this is something you can
+ * add to control->i2c_address, and then tell I2C layer to read
+ * from/write to there. _N is the so called absolute index,
+ * because it starts right after the table header.
+ */
+#define RAS_INDEX_TO_OFFSET(_C, _N) ((_C)->ras_record_offset + \
+ (_N) * RAS_TABLE_RECORD_SIZE)
+
+#define RAS_OFFSET_TO_INDEX(_C, _O) (((_O) - \
+ (_C)->ras_record_offset) / RAS_TABLE_RECORD_SIZE)
+
+/* Given a 0-based relative record index, 0, 1, 2, ..., etc., off
+ * of "fri", return the absolute record index off of the end of
+ * the table header.
+ */
+#define RAS_RI_TO_AI(_C, _I) (((_I) + (_C)->ras_fri) % \
+ (_C)->ras_max_record_count)
+
+#define RAS_NUM_RECS(_tbl_hdr) (((_tbl_hdr)->tbl_size - \
+ RAS_TABLE_HEADER_SIZE) / RAS_TABLE_RECORD_SIZE)
+
+#define RAS_NUM_RECS_V2_1(_tbl_hdr) (((_tbl_hdr)->tbl_size - \
+ RAS_TABLE_HEADER_SIZE - \
+ RAS_TABLE_V2_1_INFO_SIZE) / RAS_TABLE_RECORD_SIZE)
+
+#define to_amdgpu_device(x) ((container_of(x, struct amdgpu_ras, eeprom_control))->adev)
+
+static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(11, 0, 2): /* VEGA20 and ARCTURUS */
+ case IP_VERSION(11, 0, 7): /* Sienna cichlid */
+ case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 2): /* Aldebaran */
+ case IP_VERSION(13, 0, 10):
+ return true;
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
+ case IP_VERSION(13, 0, 14):
+ return (adev->gmc.is_app_apu) ? false : true;
+ default:
+ return false;
+ }
+}
+
+static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
+ struct amdgpu_ras_eeprom_control *control)
+{
+ struct atom_context *atom_ctx = adev->mode_info.atom_context;
+ u8 i2c_addr;
+
+ if (!control)
+ return false;
+
+ if (adev->bios && amdgpu_atomfirmware_ras_rom_addr(adev, &i2c_addr)) {
+ /* The address given by VBIOS is an 8-bit, wire-format
+ * address, i.e. the most significant byte.
+ *
+ * Normalize it to a 19-bit EEPROM address. Remove the
+ * device type identifier and make it a 7-bit address;
+ * then make it a 19-bit EEPROM address. See top of
+ * amdgpu_eeprom.c.
+ */
+ i2c_addr = (i2c_addr & 0x0F) >> 1;
+ control->i2c_address = ((u32) i2c_addr) << 16;
+
+ return true;
+ }
+
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(11, 0, 2):
+ /* VEGA20 and ARCTURUS */
+ if (adev->asic_type == CHIP_VEGA20)
+ control->i2c_address = EEPROM_I2C_MADDR_0;
+ else if (strnstr(atom_ctx->vbios_pn,
+ "D342",
+ sizeof(atom_ctx->vbios_pn)))
+ control->i2c_address = EEPROM_I2C_MADDR_0;
+ else
+ control->i2c_address = EEPROM_I2C_MADDR_4;
+ return true;
+ case IP_VERSION(11, 0, 7):
+ control->i2c_address = EEPROM_I2C_MADDR_0;
+ return true;
+ case IP_VERSION(13, 0, 2):
+ if (strnstr(atom_ctx->vbios_pn, "D673",
+ sizeof(atom_ctx->vbios_pn)))
+ control->i2c_address = EEPROM_I2C_MADDR_4;
+ else
+ control->i2c_address = EEPROM_I2C_MADDR_0;
+ return true;
+ case IP_VERSION(13, 0, 0):
+ if (strnstr(atom_ctx->vbios_pn, "D707",
+ sizeof(atom_ctx->vbios_pn)))
+ control->i2c_address = EEPROM_I2C_MADDR_0;
+ else
+ control->i2c_address = EEPROM_I2C_MADDR_4;
+ return true;
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 10):
+ case IP_VERSION(13, 0, 12):
+ case IP_VERSION(13, 0, 14):
+ control->i2c_address = EEPROM_I2C_MADDR_4;
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void
+__encode_table_header_to_buf(struct amdgpu_ras_eeprom_table_header *hdr,
+ unsigned char *buf)
+{
+ u32 *pp = (uint32_t *)buf;
+
+ pp[0] = cpu_to_le32(hdr->header);
+ pp[1] = cpu_to_le32(hdr->version);
+ pp[2] = cpu_to_le32(hdr->first_rec_offset);
+ pp[3] = cpu_to_le32(hdr->tbl_size);
+ pp[4] = cpu_to_le32(hdr->checksum);
+}
+
+static void
+__decode_table_header_from_buf(struct amdgpu_ras_eeprom_table_header *hdr,
+ unsigned char *buf)
+{
+ u32 *pp = (uint32_t *)buf;
+
+ hdr->header = le32_to_cpu(pp[0]);
+ hdr->version = le32_to_cpu(pp[1]);
+ hdr->first_rec_offset = le32_to_cpu(pp[2]);
+ hdr->tbl_size = le32_to_cpu(pp[3]);
+ hdr->checksum = le32_to_cpu(pp[4]);
+}
+
+static int __write_table_header(struct amdgpu_ras_eeprom_control *control)
+{
+ u8 buf[RAS_TABLE_HEADER_SIZE];
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ int res;
+
+ memset(buf, 0, sizeof(buf));
+ __encode_table_header_to_buf(&control->tbl_hdr, buf);
+
+ /* i2c may be unstable in gpu reset */
+ down_read(&adev->reset_domain->sem);
+ res = amdgpu_eeprom_write(adev->pm.ras_eeprom_i2c_bus,
+ control->i2c_address +
+ control->ras_header_offset,
+ buf, RAS_TABLE_HEADER_SIZE);
+ up_read(&adev->reset_domain->sem);
+
+ if (res < 0) {
+ dev_err(adev->dev, "Failed to write EEPROM table header:%d",
+ res);
+ } else if (res < RAS_TABLE_HEADER_SIZE) {
+ dev_err(adev->dev, "Short write:%d out of %d\n", res,
+ RAS_TABLE_HEADER_SIZE);
+ res = -EIO;
+ } else {
+ res = 0;
+ }
+
+ return res;
+}
+
+static void
+__encode_table_ras_info_to_buf(struct amdgpu_ras_eeprom_table_ras_info *rai,
+ unsigned char *buf)
+{
+ u32 *pp = (uint32_t *)buf;
+ u32 tmp;
+
+ tmp = ((uint32_t)(rai->rma_status) & 0xFF) |
+ (((uint32_t)(rai->health_percent) << 8) & 0xFF00) |
+ (((uint32_t)(rai->ecc_page_threshold) << 16) & 0xFFFF0000);
+ pp[0] = cpu_to_le32(tmp);
+}
+
+static void
+__decode_table_ras_info_from_buf(struct amdgpu_ras_eeprom_table_ras_info *rai,
+ unsigned char *buf)
+{
+ u32 *pp = (uint32_t *)buf;
+ u32 tmp;
+
+ tmp = le32_to_cpu(pp[0]);
+ rai->rma_status = tmp & 0xFF;
+ rai->health_percent = (tmp >> 8) & 0xFF;
+ rai->ecc_page_threshold = (tmp >> 16) & 0xFFFF;
+}
+
+static int __write_table_ras_info(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ u8 *buf;
+ int res;
+
+ buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL);
+ if (!buf) {
+ dev_err(adev->dev,
+ "Failed to alloc buf to write table ras info\n");
+ return -ENOMEM;
+ }
+
+ __encode_table_ras_info_to_buf(&control->tbl_rai, buf);
+
+ /* i2c may be unstable in gpu reset */
+ down_read(&adev->reset_domain->sem);
+ res = amdgpu_eeprom_write(adev->pm.ras_eeprom_i2c_bus,
+ control->i2c_address +
+ control->ras_info_offset,
+ buf, RAS_TABLE_V2_1_INFO_SIZE);
+ up_read(&adev->reset_domain->sem);
+
+ if (res < 0) {
+ dev_err(adev->dev, "Failed to write EEPROM table ras info:%d",
+ res);
+ } else if (res < RAS_TABLE_V2_1_INFO_SIZE) {
+ dev_err(adev->dev, "Short write:%d out of %d\n", res,
+ RAS_TABLE_V2_1_INFO_SIZE);
+ res = -EIO;
+ } else {
+ res = 0;
+ }
+
+ kfree(buf);
+
+ return res;
+}
+
+static u8 __calc_hdr_byte_sum(const struct amdgpu_ras_eeprom_control *control)
+{
+ int ii;
+ u8 *pp, csum;
+ size_t sz;
+
+ /* Header checksum, skip checksum field in the calculation */
+ sz = sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum);
+ pp = (u8 *) &control->tbl_hdr;
+ csum = 0;
+ for (ii = 0; ii < sz; ii++, pp++)
+ csum += *pp;
+
+ return csum;
+}
+
+static u8 __calc_ras_info_byte_sum(const struct amdgpu_ras_eeprom_control *control)
+{
+ int ii;
+ u8 *pp, csum;
+ size_t sz;
+
+ sz = sizeof(control->tbl_rai);
+ pp = (u8 *) &control->tbl_rai;
+ csum = 0;
+ for (ii = 0; ii < sz; ii++, pp++)
+ csum += *pp;
+
+ return csum;
+}
+
+static int amdgpu_ras_eeprom_correct_header_tag(
+ struct amdgpu_ras_eeprom_control *control,
+ uint32_t header)
+{
+ struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ u8 *hh;
+ int res;
+ u8 csum;
+
+ csum = -hdr->checksum;
+
+ hh = (void *) &hdr->header;
+ csum -= (hh[0] + hh[1] + hh[2] + hh[3]);
+ hh = (void *) &header;
+ csum += hh[0] + hh[1] + hh[2] + hh[3];
+ csum = -csum;
+ mutex_lock(&control->ras_tbl_mutex);
+ hdr->header = header;
+ hdr->checksum = csum;
+ res = __write_table_header(control);
+ mutex_unlock(&control->ras_tbl_mutex);
+
+ return res;
+}
+
+static void amdgpu_ras_set_eeprom_table_version(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+
+ switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
+ case IP_VERSION(8, 10, 0):
+ hdr->version = RAS_TABLE_VER_V2_1;
+ return;
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 5, 0):
+ hdr->version = RAS_TABLE_VER_V3;
+ return;
+ default:
+ hdr->version = RAS_TABLE_VER_V1;
+ return;
+ }
+}
+
+/**
+ * amdgpu_ras_eeprom_reset_table -- Reset the RAS EEPROM table
+ * @control: pointer to control structure
+ *
+ * Reset the contents of the header of the RAS EEPROM table.
+ * Return 0 on success, -errno on error.
+ */
+int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ struct amdgpu_ras_eeprom_table_ras_info *rai = &control->tbl_rai;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ u8 csum;
+ int res;
+
+ mutex_lock(&control->ras_tbl_mutex);
+
+ hdr->header = RAS_TABLE_HDR_VAL;
+ amdgpu_ras_set_eeprom_table_version(control);
+
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
+ hdr->first_rec_offset = RAS_RECORD_START_V2_1;
+ hdr->tbl_size = RAS_TABLE_HEADER_SIZE +
+ RAS_TABLE_V2_1_INFO_SIZE;
+ rai->rma_status = GPU_HEALTH_USABLE;
+ /**
+ * GPU health represented as a percentage.
+ * 0 means worst health, 100 means fully health.
+ */
+ rai->health_percent = 100;
+ /* ecc_page_threshold = 0 means disable bad page retirement */
+ rai->ecc_page_threshold = con->bad_page_cnt_threshold;
+ } else {
+ hdr->first_rec_offset = RAS_RECORD_START;
+ hdr->tbl_size = RAS_TABLE_HEADER_SIZE;
+ }
+
+ csum = __calc_hdr_byte_sum(control);
+ if (hdr->version >= RAS_TABLE_VER_V2_1)
+ csum += __calc_ras_info_byte_sum(control);
+ csum = -csum;
+ hdr->checksum = csum;
+ res = __write_table_header(control);
+ if (!res && hdr->version > RAS_TABLE_VER_V1)
+ res = __write_table_ras_info(control);
+
+ control->ras_num_recs = 0;
+ control->ras_num_bad_pages = 0;
+ control->ras_num_mca_recs = 0;
+ control->ras_num_pa_recs = 0;
+ control->ras_fri = 0;
+
+ amdgpu_dpm_send_hbm_bad_pages_num(adev, control->ras_num_bad_pages);
+
+ control->bad_channel_bitmap = 0;
+ amdgpu_dpm_send_hbm_bad_channel_flag(adev, control->bad_channel_bitmap);
+ con->update_channel_flag = false;
+
+ amdgpu_ras_debugfs_set_ret_size(control);
+
+ mutex_unlock(&control->ras_tbl_mutex);
+
+ return res;
+}
+
+static void
+__encode_table_record_to_buf(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *record,
+ unsigned char *buf)
+{
+ __le64 tmp = 0;
+ int i = 0;
+
+ /* Next are all record fields according to EEPROM page spec in LE foramt */
+ buf[i++] = record->err_type;
+
+ buf[i++] = record->bank;
+
+ tmp = cpu_to_le64(record->ts);
+ memcpy(buf + i, &tmp, 8);
+ i += 8;
+
+ tmp = cpu_to_le64((record->offset & 0xffffffffffff));
+ memcpy(buf + i, &tmp, 6);
+ i += 6;
+
+ buf[i++] = record->mem_channel;
+ buf[i++] = record->mcumc_id;
+
+ tmp = cpu_to_le64((record->retired_page & 0xffffffffffff));
+ memcpy(buf + i, &tmp, 6);
+}
+
+static void
+__decode_table_record_from_buf(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *record,
+ unsigned char *buf)
+{
+ __le64 tmp = 0;
+ int i = 0;
+
+ /* Next are all record fields according to EEPROM page spec in LE foramt */
+ record->err_type = buf[i++];
+
+ record->bank = buf[i++];
+
+ memcpy(&tmp, buf + i, 8);
+ record->ts = le64_to_cpu(tmp);
+ i += 8;
+
+ memcpy(&tmp, buf + i, 6);
+ record->offset = (le64_to_cpu(tmp) & 0xffffffffffff);
+ i += 6;
+
+ record->mem_channel = buf[i++];
+ record->mcumc_id = buf[i++];
+
+ memcpy(&tmp, buf + i, 6);
+ record->retired_page = (le64_to_cpu(tmp) & 0xffffffffffff);
+}
+
+bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!__is_ras_eeprom_supported(adev) ||
+ !amdgpu_bad_page_threshold)
+ return false;
+
+ /* skip check eeprom table for VEGA20 Gaming */
+ if (!con)
+ return false;
+ else
+ if (!(con->features & BIT(AMDGPU_RAS_BLOCK__UMC)))
+ return false;
+
+ if (con->eeprom_control.tbl_hdr.header == RAS_TABLE_HDR_BAD) {
+ if (con->eeprom_control.ras_num_bad_pages > con->bad_page_cnt_threshold)
+ dev_warn(adev->dev, "RAS records:%d exceed threshold:%d",
+ con->eeprom_control.ras_num_bad_pages, con->bad_page_cnt_threshold);
+ if ((amdgpu_bad_page_threshold == -1) ||
+ (amdgpu_bad_page_threshold == -2)) {
+ dev_warn(adev->dev,
+ "Please consult AMD Service Action Guide (SAG) for appropriate service procedures.\n");
+ return false;
+ } else {
+ dev_warn(adev->dev,
+ "Please consider adjusting the customized threshold.\n");
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * __amdgpu_ras_eeprom_write -- write indexed from buffer to EEPROM
+ * @control: pointer to control structure
+ * @buf: pointer to buffer containing data to write
+ * @fri: start writing at this index
+ * @num: number of records to write
+ *
+ * The caller must hold the table mutex in @control.
+ * Return 0 on success, -errno otherwise.
+ */
+static int __amdgpu_ras_eeprom_write(struct amdgpu_ras_eeprom_control *control,
+ u8 *buf, const u32 fri, const u32 num)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ u32 buf_size;
+ int res;
+
+ /* i2c may be unstable in gpu reset */
+ down_read(&adev->reset_domain->sem);
+ buf_size = num * RAS_TABLE_RECORD_SIZE;
+ res = amdgpu_eeprom_write(adev->pm.ras_eeprom_i2c_bus,
+ control->i2c_address +
+ RAS_INDEX_TO_OFFSET(control, fri),
+ buf, buf_size);
+ up_read(&adev->reset_domain->sem);
+ if (res < 0) {
+ dev_err(adev->dev, "Writing %d EEPROM table records error:%d",
+ num, res);
+ } else if (res < buf_size) {
+ /* Short write, return error.
+ */
+ dev_err(adev->dev, "Wrote %d records out of %d",
+ res / RAS_TABLE_RECORD_SIZE, num);
+ res = -EIO;
+ } else {
+ res = 0;
+ }
+
+ return res;
+}
+
+static int
+amdgpu_ras_eeprom_append_table(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *record,
+ const u32 num)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(to_amdgpu_device(control));
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ u32 a, b, i;
+ u8 *buf, *pp;
+ int res;
+
+ buf = kcalloc(num, RAS_TABLE_RECORD_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Encode all of them in one go.
+ */
+ pp = buf;
+ for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE) {
+ __encode_table_record_to_buf(control, &record[i], pp);
+
+ /* update bad channel bitmap */
+ if ((record[i].mem_channel < BITS_PER_TYPE(control->bad_channel_bitmap)) &&
+ !(control->bad_channel_bitmap & (1 << record[i].mem_channel))) {
+ control->bad_channel_bitmap |= 1 << record[i].mem_channel;
+ con->update_channel_flag = true;
+ }
+ }
+
+ /* a, first record index to write into.
+ * b, last record index to write into.
+ * a = first index to read (fri) + number of records in the table,
+ * b = a + @num - 1.
+ * Let N = control->ras_max_num_record_count, then we have,
+ * case 0: 0 <= a <= b < N,
+ * just append @num records starting at a;
+ * case 1: 0 <= a < N <= b,
+ * append (N - a) records starting at a, and
+ * append the remainder, b % N + 1, starting at 0.
+ * case 2: 0 <= fri < N <= a <= b, then modulo N we get two subcases,
+ * case 2a: 0 <= a <= b < N
+ * append num records starting at a; and fix fri if b overwrote it,
+ * and since a <= b, if b overwrote it then a must've also,
+ * and if b didn't overwrite it, then a didn't also.
+ * case 2b: 0 <= b < a < N
+ * write num records starting at a, which wraps around 0=N
+ * and overwrite fri unconditionally. Now from case 2a,
+ * this means that b eclipsed fri to overwrite it and wrap
+ * around 0 again, i.e. b = 2N+r pre modulo N, so we unconditionally
+ * set fri = b + 1 (mod N).
+ * Now, since fri is updated in every case, except the trivial case 0,
+ * the number of records present in the table after writing, is,
+ * num_recs - 1 = b - fri (mod N), and we take the positive value,
+ * by adding an arbitrary multiple of N before taking the modulo N
+ * as shown below.
+ */
+ a = control->ras_fri + control->ras_num_recs;
+ b = a + num - 1;
+ if (b < control->ras_max_record_count) {
+ res = __amdgpu_ras_eeprom_write(control, buf, a, num);
+ } else if (a < control->ras_max_record_count) {
+ u32 g0, g1;
+
+ g0 = control->ras_max_record_count - a;
+ g1 = b % control->ras_max_record_count + 1;
+ res = __amdgpu_ras_eeprom_write(control, buf, a, g0);
+ if (res)
+ goto Out;
+ res = __amdgpu_ras_eeprom_write(control,
+ buf + g0 * RAS_TABLE_RECORD_SIZE,
+ 0, g1);
+ if (res)
+ goto Out;
+ if (g1 > control->ras_fri)
+ control->ras_fri = g1 % control->ras_max_record_count;
+ } else {
+ a %= control->ras_max_record_count;
+ b %= control->ras_max_record_count;
+
+ if (a <= b) {
+ /* Note that, b - a + 1 = num. */
+ res = __amdgpu_ras_eeprom_write(control, buf, a, num);
+ if (res)
+ goto Out;
+ if (b >= control->ras_fri)
+ control->ras_fri = (b + 1) % control->ras_max_record_count;
+ } else {
+ u32 g0, g1;
+
+ /* b < a, which means, we write from
+ * a to the end of the table, and from
+ * the start of the table to b.
+ */
+ g0 = control->ras_max_record_count - a;
+ g1 = b + 1;
+ res = __amdgpu_ras_eeprom_write(control, buf, a, g0);
+ if (res)
+ goto Out;
+ res = __amdgpu_ras_eeprom_write(control,
+ buf + g0 * RAS_TABLE_RECORD_SIZE,
+ 0, g1);
+ if (res)
+ goto Out;
+ control->ras_fri = g1 % control->ras_max_record_count;
+ }
+ }
+ control->ras_num_recs = 1 + (control->ras_max_record_count + b
+ - control->ras_fri)
+ % control->ras_max_record_count;
+
+ /*old asics only save pa to eeprom like before*/
+ if (IP_VERSION_MAJ(amdgpu_ip_version(adev, UMC_HWIP, 0)) < 12)
+ control->ras_num_pa_recs += num;
+ else
+ control->ras_num_mca_recs += num;
+
+ control->ras_num_bad_pages = con->bad_page_num;
+Out:
+ kfree(buf);
+ return res;
+}
+
+static int
+amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ u8 *buf, *pp, csum;
+ u32 buf_size;
+ int res;
+
+ /* Modify the header if it exceeds.
+ */
+ if (amdgpu_bad_page_threshold != 0 &&
+ control->ras_num_bad_pages > ras->bad_page_cnt_threshold) {
+ dev_warn(adev->dev,
+ "Saved bad pages %d reaches threshold value %d\n",
+ control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
+
+ if (adev->cper.enabled && amdgpu_cper_generate_bp_threshold_record(adev))
+ dev_warn(adev->dev, "fail to generate bad page threshold cper records\n");
+
+ if ((amdgpu_bad_page_threshold != -1) &&
+ (amdgpu_bad_page_threshold != -2)) {
+ control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) {
+ control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD;
+ control->tbl_rai.health_percent = 0;
+ }
+ ras->is_rma = true;
+ }
+
+ /* ignore the -ENOTSUPP return value */
+ amdgpu_dpm_send_rma_reason(adev);
+ }
+
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
+ control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE +
+ RAS_TABLE_V2_1_INFO_SIZE +
+ control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+ else
+ control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE +
+ control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+ control->tbl_hdr.checksum = 0;
+
+ buf_size = control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+ buf = kcalloc(control->ras_num_recs, RAS_TABLE_RECORD_SIZE, GFP_KERNEL);
+ if (!buf) {
+ dev_err(adev->dev,
+ "allocating memory for table of size %d bytes failed\n",
+ control->tbl_hdr.tbl_size);
+ res = -ENOMEM;
+ goto Out;
+ }
+
+ down_read(&adev->reset_domain->sem);
+ res = amdgpu_eeprom_read(adev->pm.ras_eeprom_i2c_bus,
+ control->i2c_address +
+ control->ras_record_offset,
+ buf, buf_size);
+ up_read(&adev->reset_domain->sem);
+ if (res < 0) {
+ dev_err(adev->dev, "EEPROM failed reading records:%d\n", res);
+ goto Out;
+ } else if (res < buf_size) {
+ dev_err(adev->dev, "EEPROM read %d out of %d bytes\n", res,
+ buf_size);
+ res = -EIO;
+ goto Out;
+ }
+
+ /**
+ * bad page records have been stored in eeprom,
+ * now calculate gpu health percent
+ */
+ if (amdgpu_bad_page_threshold != 0 &&
+ control->tbl_hdr.version >= RAS_TABLE_VER_V2_1 &&
+ control->ras_num_bad_pages <= ras->bad_page_cnt_threshold)
+ control->tbl_rai.health_percent = ((ras->bad_page_cnt_threshold -
+ control->ras_num_bad_pages) * 100) /
+ ras->bad_page_cnt_threshold;
+
+ /* Recalc the checksum.
+ */
+ csum = 0;
+ for (pp = buf; pp < buf + buf_size; pp++)
+ csum += *pp;
+
+ csum += __calc_hdr_byte_sum(control);
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
+ csum += __calc_ras_info_byte_sum(control);
+ /* avoid sign extension when assigning to "checksum" */
+ csum = -csum;
+ control->tbl_hdr.checksum = csum;
+ res = __write_table_header(control);
+ if (!res && control->tbl_hdr.version > RAS_TABLE_VER_V1)
+ res = __write_table_ras_info(control);
+Out:
+ kfree(buf);
+ return res;
+}
+
+/**
+ * amdgpu_ras_eeprom_append -- append records to the EEPROM RAS table
+ * @control: pointer to control structure
+ * @record: array of records to append
+ * @num: number of records in @record array
+ *
+ * Append @num records to the table, calculate the checksum and write
+ * the table back to EEPROM. The maximum number of records that
+ * can be appended is between 1 and control->ras_max_record_count,
+ * regardless of how many records are already stored in the table.
+ *
+ * Return 0 on success or if EEPROM is not supported, -errno on error.
+ */
+int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *record,
+ const u32 num)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ int res, i;
+ uint64_t nps = AMDGPU_NPS1_PARTITION_MODE;
+
+ if (!__is_ras_eeprom_supported(adev))
+ return 0;
+
+ if (num == 0) {
+ dev_err(adev->dev, "will not append 0 records\n");
+ return -EINVAL;
+ } else if (num > control->ras_max_record_count) {
+ dev_err(adev->dev,
+ "cannot append %d records than the size of table %d\n",
+ num, control->ras_max_record_count);
+ return -EINVAL;
+ }
+
+ if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+ nps = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+
+ /* set the new channel index flag */
+ for (i = 0; i < num; i++)
+ record[i].retired_page |= (nps << UMC_NPS_SHIFT);
+
+ mutex_lock(&control->ras_tbl_mutex);
+
+ res = amdgpu_ras_eeprom_append_table(control, record, num);
+ if (!res)
+ res = amdgpu_ras_eeprom_update_header(control);
+ if (!res)
+ amdgpu_ras_debugfs_set_ret_size(control);
+
+ mutex_unlock(&control->ras_tbl_mutex);
+
+ /* clear channel index flag, the flag is only saved on eeprom */
+ for (i = 0; i < num; i++)
+ record[i].retired_page &= ~(nps << UMC_NPS_SHIFT);
+
+ return res;
+}
+
+/**
+ * __amdgpu_ras_eeprom_read -- read indexed from EEPROM into buffer
+ * @control: pointer to control structure
+ * @buf: pointer to buffer to read into
+ * @fri: first record index, start reading at this index, absolute index
+ * @num: number of records to read
+ *
+ * The caller must hold the table mutex in @control.
+ * Return 0 on success, -errno otherwise.
+ */
+static int __amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
+ u8 *buf, const u32 fri, const u32 num)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ u32 buf_size;
+ int res;
+
+ /* i2c may be unstable in gpu reset */
+ down_read(&adev->reset_domain->sem);
+ buf_size = num * RAS_TABLE_RECORD_SIZE;
+ res = amdgpu_eeprom_read(adev->pm.ras_eeprom_i2c_bus,
+ control->i2c_address +
+ RAS_INDEX_TO_OFFSET(control, fri),
+ buf, buf_size);
+ up_read(&adev->reset_domain->sem);
+ if (res < 0) {
+ dev_err(adev->dev, "Reading %d EEPROM table records error:%d",
+ num, res);
+ } else if (res < buf_size) {
+ /* Short read, return error.
+ */
+ dev_err(adev->dev, "Read %d records out of %d",
+ res / RAS_TABLE_RECORD_SIZE, num);
+ res = -EIO;
+ } else {
+ res = 0;
+ }
+
+ return res;
+}
+
+/**
+ * amdgpu_ras_eeprom_read -- read EEPROM
+ * @control: pointer to control structure
+ * @record: array of records to read into
+ * @num: number of records in @record
+ *
+ * Reads num records from the RAS table in EEPROM and
+ * writes the data into @record array.
+ *
+ * Returns 0 on success, -errno on error.
+ */
+int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *record,
+ const u32 num)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int i, res;
+ u8 *buf, *pp;
+ u32 g0, g1;
+
+ if (!__is_ras_eeprom_supported(adev))
+ return 0;
+
+ if (num == 0) {
+ dev_err(adev->dev, "will not read 0 records\n");
+ return -EINVAL;
+ } else if (num > control->ras_num_recs) {
+ dev_err(adev->dev, "too many records to read:%d available:%d\n",
+ num, control->ras_num_recs);
+ return -EINVAL;
+ }
+
+ buf = kcalloc(num, RAS_TABLE_RECORD_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Determine how many records to read, from the first record
+ * index, fri, to the end of the table, and from the beginning
+ * of the table, such that the total number of records is
+ * @num, and we handle wrap around when fri > 0 and
+ * fri + num > RAS_MAX_RECORD_COUNT.
+ *
+ * First we compute the index of the last element
+ * which would be fetched from each region,
+ * g0 is in [fri, fri + num - 1], and
+ * g1 is in [0, RAS_MAX_RECORD_COUNT - 1].
+ * Then, if g0 < RAS_MAX_RECORD_COUNT, the index of
+ * the last element to fetch, we set g0 to _the number_
+ * of elements to fetch, @num, since we know that the last
+ * indexed to be fetched does not exceed the table.
+ *
+ * If, however, g0 >= RAS_MAX_RECORD_COUNT, then
+ * we set g0 to the number of elements to read
+ * until the end of the table, and g1 to the number of
+ * elements to read from the beginning of the table.
+ */
+ g0 = control->ras_fri + num - 1;
+ g1 = g0 % control->ras_max_record_count;
+ if (g0 < control->ras_max_record_count) {
+ g0 = num;
+ g1 = 0;
+ } else {
+ g0 = control->ras_max_record_count - control->ras_fri;
+ g1 += 1;
+ }
+
+ mutex_lock(&control->ras_tbl_mutex);
+ res = __amdgpu_ras_eeprom_read(control, buf, control->ras_fri, g0);
+ if (res)
+ goto Out;
+ if (g1) {
+ res = __amdgpu_ras_eeprom_read(control,
+ buf + g0 * RAS_TABLE_RECORD_SIZE,
+ 0, g1);
+ if (res)
+ goto Out;
+ }
+
+ res = 0;
+
+ /* Read up everything? Then transform.
+ */
+ pp = buf;
+ for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE) {
+ __decode_table_record_from_buf(control, &record[i], pp);
+
+ /* update bad channel bitmap */
+ if ((record[i].mem_channel < BITS_PER_TYPE(control->bad_channel_bitmap)) &&
+ !(control->bad_channel_bitmap & (1 << record[i].mem_channel))) {
+ control->bad_channel_bitmap |= 1 << record[i].mem_channel;
+ con->update_channel_flag = true;
+ }
+ }
+Out:
+ kfree(buf);
+ mutex_unlock(&control->ras_tbl_mutex);
+
+ return res;
+}
+
+uint32_t amdgpu_ras_eeprom_max_record_count(struct amdgpu_ras_eeprom_control *control)
+{
+ /* get available eeprom table version first before eeprom table init */
+ amdgpu_ras_set_eeprom_table_version(control);
+
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
+ return RAS_MAX_RECORD_COUNT_V2_1;
+ else
+ return RAS_MAX_RECORD_COUNT;
+}
+
+static ssize_t
+amdgpu_ras_debugfs_eeprom_size_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_eeprom_control *control = ras ? &ras->eeprom_control : NULL;
+ u8 data[50];
+ int res;
+
+ if (!size)
+ return size;
+
+ if (!ras || !control) {
+ res = snprintf(data, sizeof(data), "Not supported\n");
+ } else {
+ res = snprintf(data, sizeof(data), "%d bytes or %d records\n",
+ RAS_TBL_SIZE_BYTES, control->ras_max_record_count);
+ }
+
+ if (*pos >= res)
+ return 0;
+
+ res -= *pos;
+ res = min_t(size_t, res, size);
+
+ if (copy_to_user(buf, &data[*pos], res))
+ return -EFAULT;
+
+ *pos += res;
+
+ return res;
+}
+
+const struct file_operations amdgpu_ras_debugfs_eeprom_size_ops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_ras_debugfs_eeprom_size_read,
+ .write = NULL,
+ .llseek = default_llseek,
+};
+
+static const char *tbl_hdr_str = " Signature Version FirstOffs Size Checksum\n";
+static const char *tbl_hdr_fmt = "0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n";
+#define tbl_hdr_fmt_size (5 * (2+8) + 4 + 1)
+static const char *rec_hdr_str = "Index Offset ErrType Bank/CU TimeStamp Offs/Addr MemChl MCUMCID RetiredPage\n";
+static const char *rec_hdr_fmt = "%5d 0x%05X %7s 0x%02X 0x%016llX 0x%012llX 0x%02X 0x%02X 0x%012llX\n";
+#define rec_hdr_fmt_size (5 + 1 + 7 + 1 + 7 + 1 + 7 + 1 + 18 + 1 + 14 + 1 + 6 + 1 + 7 + 1 + 14 + 1)
+
+static const char *record_err_type_str[AMDGPU_RAS_EEPROM_ERR_COUNT] = {
+ "ignore",
+ "re",
+ "ue",
+};
+
+static loff_t amdgpu_ras_debugfs_table_size(struct amdgpu_ras_eeprom_control *control)
+{
+ return strlen(tbl_hdr_str) + tbl_hdr_fmt_size +
+ strlen(rec_hdr_str) + rec_hdr_fmt_size * control->ras_num_recs;
+}
+
+void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_ras *ras = container_of(control, struct amdgpu_ras,
+ eeprom_control);
+ struct dentry *de = ras->de_ras_eeprom_table;
+
+ if (de)
+ d_inode(de)->i_size = amdgpu_ras_debugfs_table_size(control);
+}
+
+static ssize_t amdgpu_ras_debugfs_table_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_eeprom_control *control = &ras->eeprom_control;
+ const size_t orig_size = size;
+ int res = -EFAULT;
+ size_t data_len;
+
+ mutex_lock(&control->ras_tbl_mutex);
+
+ /* We want *pos - data_len > 0, which means there's
+ * bytes to be printed from data.
+ */
+ data_len = strlen(tbl_hdr_str);
+ if (*pos < data_len) {
+ data_len -= *pos;
+ data_len = min_t(size_t, data_len, size);
+ if (copy_to_user(buf, &tbl_hdr_str[*pos], data_len))
+ goto Out;
+ buf += data_len;
+ size -= data_len;
+ *pos += data_len;
+ }
+
+ data_len = strlen(tbl_hdr_str) + tbl_hdr_fmt_size;
+ if (*pos < data_len && size > 0) {
+ u8 data[tbl_hdr_fmt_size + 1];
+ loff_t lpos;
+
+ snprintf(data, sizeof(data), tbl_hdr_fmt,
+ control->tbl_hdr.header,
+ control->tbl_hdr.version,
+ control->tbl_hdr.first_rec_offset,
+ control->tbl_hdr.tbl_size,
+ control->tbl_hdr.checksum);
+
+ data_len -= *pos;
+ data_len = min_t(size_t, data_len, size);
+ lpos = *pos - strlen(tbl_hdr_str);
+ if (copy_to_user(buf, &data[lpos], data_len))
+ goto Out;
+ buf += data_len;
+ size -= data_len;
+ *pos += data_len;
+ }
+
+ data_len = strlen(tbl_hdr_str) + tbl_hdr_fmt_size + strlen(rec_hdr_str);
+ if (*pos < data_len && size > 0) {
+ loff_t lpos;
+
+ data_len -= *pos;
+ data_len = min_t(size_t, data_len, size);
+ lpos = *pos - strlen(tbl_hdr_str) - tbl_hdr_fmt_size;
+ if (copy_to_user(buf, &rec_hdr_str[lpos], data_len))
+ goto Out;
+ buf += data_len;
+ size -= data_len;
+ *pos += data_len;
+ }
+
+ data_len = amdgpu_ras_debugfs_table_size(control);
+ if (*pos < data_len && size > 0) {
+ u8 dare[RAS_TABLE_RECORD_SIZE];
+ u8 data[rec_hdr_fmt_size + 1];
+ struct eeprom_table_record record;
+ int s, r;
+
+ /* Find the starting record index
+ */
+ s = *pos - strlen(tbl_hdr_str) - tbl_hdr_fmt_size -
+ strlen(rec_hdr_str);
+ s = s / rec_hdr_fmt_size;
+ r = *pos - strlen(tbl_hdr_str) - tbl_hdr_fmt_size -
+ strlen(rec_hdr_str);
+ r = r % rec_hdr_fmt_size;
+
+ for ( ; size > 0 && s < control->ras_num_recs; s++) {
+ u32 ai = RAS_RI_TO_AI(control, s);
+ /* Read a single record
+ */
+ res = __amdgpu_ras_eeprom_read(control, dare, ai, 1);
+ if (res)
+ goto Out;
+ __decode_table_record_from_buf(control, &record, dare);
+ snprintf(data, sizeof(data), rec_hdr_fmt,
+ s,
+ RAS_INDEX_TO_OFFSET(control, ai),
+ record_err_type_str[record.err_type],
+ record.bank,
+ record.ts,
+ record.offset,
+ record.mem_channel,
+ record.mcumc_id,
+ record.retired_page);
+
+ data_len = min_t(size_t, rec_hdr_fmt_size - r, size);
+ if (copy_to_user(buf, &data[r], data_len)) {
+ res = -EFAULT;
+ goto Out;
+ }
+ buf += data_len;
+ size -= data_len;
+ *pos += data_len;
+ r = 0;
+ }
+ }
+ res = 0;
+Out:
+ mutex_unlock(&control->ras_tbl_mutex);
+ return res < 0 ? res : orig_size - size;
+}
+
+static ssize_t
+amdgpu_ras_debugfs_eeprom_table_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_eeprom_control *control = ras ? &ras->eeprom_control : NULL;
+ u8 data[81];
+ int res;
+
+ if (!size)
+ return size;
+
+ if (!ras || !control) {
+ res = snprintf(data, sizeof(data), "Not supported\n");
+ if (*pos >= res)
+ return 0;
+
+ res -= *pos;
+ res = min_t(size_t, res, size);
+
+ if (copy_to_user(buf, &data[*pos], res))
+ return -EFAULT;
+
+ *pos += res;
+
+ return res;
+ } else {
+ return amdgpu_ras_debugfs_table_read(f, buf, size, pos);
+ }
+}
+
+const struct file_operations amdgpu_ras_debugfs_eeprom_table_ops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_ras_debugfs_eeprom_table_read,
+ .write = NULL,
+ .llseek = default_llseek,
+};
+
+/**
+ * __verify_ras_table_checksum -- verify the RAS EEPROM table checksum
+ * @control: pointer to control structure
+ *
+ * Check the checksum of the stored in EEPROM RAS table.
+ *
+ * Return 0 if the checksum is correct,
+ * positive if it is not correct, and
+ * -errno on I/O error.
+ */
+static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ int buf_size, res;
+ u8 csum, *buf, *pp;
+
+ if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1)
+ buf_size = RAS_TABLE_HEADER_SIZE +
+ RAS_TABLE_V2_1_INFO_SIZE +
+ control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+ else
+ buf_size = RAS_TABLE_HEADER_SIZE +
+ control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf) {
+ dev_err(adev->dev,
+ "Out of memory checking RAS table checksum.\n");
+ return -ENOMEM;
+ }
+
+ res = amdgpu_eeprom_read(adev->pm.ras_eeprom_i2c_bus,
+ control->i2c_address +
+ control->ras_header_offset,
+ buf, buf_size);
+ if (res < buf_size) {
+ dev_err(adev->dev, "Partial read for checksum, res:%d\n", res);
+ /* On partial reads, return -EIO.
+ */
+ if (res >= 0)
+ res = -EIO;
+ goto Out;
+ }
+
+ csum = 0;
+ for (pp = buf; pp < buf + buf_size; pp++)
+ csum += *pp;
+Out:
+ kfree(buf);
+ return res < 0 ? res : csum;
+}
+
+static int __read_table_ras_info(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_ras_eeprom_table_ras_info *rai = &control->tbl_rai;
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ unsigned char *buf;
+ int res;
+
+ buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL);
+ if (!buf) {
+ dev_err(adev->dev,
+ "Failed to alloc buf to read EEPROM table ras info\n");
+ return -ENOMEM;
+ }
+
+ /**
+ * EEPROM table V2_1 supports ras info,
+ * read EEPROM table ras info
+ */
+ res = amdgpu_eeprom_read(adev->pm.ras_eeprom_i2c_bus,
+ control->i2c_address + control->ras_info_offset,
+ buf, RAS_TABLE_V2_1_INFO_SIZE);
+ if (res < RAS_TABLE_V2_1_INFO_SIZE) {
+ dev_err(adev->dev,
+ "Failed to read EEPROM table ras info, res:%d", res);
+ res = res >= 0 ? -EIO : res;
+ goto Out;
+ }
+
+ __decode_table_ras_info_from_buf(rai, buf);
+
+Out:
+ kfree(buf);
+ return res == RAS_TABLE_V2_1_INFO_SIZE ? 0 : res;
+}
+
+int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ unsigned char buf[RAS_TABLE_HEADER_SIZE] = { 0 };
+ struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ int res;
+
+ ras->is_rma = false;
+
+ if (!__is_ras_eeprom_supported(adev))
+ return 0;
+
+ /* Verify i2c adapter is initialized */
+ if (!adev->pm.ras_eeprom_i2c_bus || !adev->pm.ras_eeprom_i2c_bus->algo)
+ return -ENOENT;
+
+ if (!__get_eeprom_i2c_addr(adev, control))
+ return -EINVAL;
+
+ control->ras_header_offset = RAS_HDR_START;
+ control->ras_info_offset = RAS_TABLE_V2_1_INFO_START;
+ mutex_init(&control->ras_tbl_mutex);
+
+ /* Read the table header from EEPROM address */
+ res = amdgpu_eeprom_read(adev->pm.ras_eeprom_i2c_bus,
+ control->i2c_address + control->ras_header_offset,
+ buf, RAS_TABLE_HEADER_SIZE);
+ if (res < RAS_TABLE_HEADER_SIZE) {
+ dev_err(adev->dev, "Failed to read EEPROM table header, res:%d",
+ res);
+ return res >= 0 ? -EIO : res;
+ }
+
+ __decode_table_header_from_buf(hdr, buf);
+
+ if (hdr->header != RAS_TABLE_HDR_VAL &&
+ hdr->header != RAS_TABLE_HDR_BAD) {
+ dev_info(adev->dev, "Creating a new EEPROM table");
+ return amdgpu_ras_eeprom_reset_table(control);
+ }
+
+ switch (hdr->version) {
+ case RAS_TABLE_VER_V2_1:
+ case RAS_TABLE_VER_V3:
+ control->ras_num_recs = RAS_NUM_RECS_V2_1(hdr);
+ control->ras_record_offset = RAS_RECORD_START_V2_1;
+ control->ras_max_record_count = RAS_MAX_RECORD_COUNT_V2_1;
+ break;
+ case RAS_TABLE_VER_V1:
+ control->ras_num_recs = RAS_NUM_RECS(hdr);
+ control->ras_record_offset = RAS_RECORD_START;
+ control->ras_max_record_count = RAS_MAX_RECORD_COUNT;
+ break;
+ default:
+ dev_err(adev->dev,
+ "RAS header invalid, unsupported version: %u",
+ hdr->version);
+ return -EINVAL;
+ }
+
+ if (control->ras_num_recs > control->ras_max_record_count) {
+ dev_err(adev->dev,
+ "RAS header invalid, records in header: %u max allowed :%u",
+ control->ras_num_recs, control->ras_max_record_count);
+ return -EINVAL;
+ }
+
+ control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset);
+ control->ras_num_mca_recs = 0;
+ control->ras_num_pa_recs = 0;
+ return 0;
+}
+
+int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control)
+{
+ struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ int res = 0;
+
+ if (!__is_ras_eeprom_supported(adev))
+ return 0;
+
+ /* Verify i2c adapter is initialized */
+ if (!adev->pm.ras_eeprom_i2c_bus || !adev->pm.ras_eeprom_i2c_bus->algo)
+ return -ENOENT;
+
+ if (!__get_eeprom_i2c_addr(adev, control))
+ return -EINVAL;
+
+ control->ras_num_bad_pages = ras->bad_page_num;
+
+ if (hdr->header == RAS_TABLE_HDR_VAL) {
+ dev_dbg(adev->dev,
+ "Found existing EEPROM table with %d records",
+ control->ras_num_bad_pages);
+
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
+ res = __read_table_ras_info(control);
+ if (res)
+ return res;
+ }
+
+ res = __verify_ras_table_checksum(control);
+ if (res)
+ dev_err(adev->dev,
+ "RAS table incorrect checksum or error:%d\n",
+ res);
+
+ /* Warn if we are at 90% of the threshold or above
+ */
+ if (10 * control->ras_num_bad_pages >= 9 * ras->bad_page_cnt_threshold)
+ dev_warn(adev->dev, "RAS records:%u exceeds 90%% of threshold:%d",
+ control->ras_num_bad_pages,
+ ras->bad_page_cnt_threshold);
+ } else if (hdr->header == RAS_TABLE_HDR_BAD &&
+ amdgpu_bad_page_threshold != 0) {
+ if (hdr->version >= RAS_TABLE_VER_V2_1) {
+ res = __read_table_ras_info(control);
+ if (res)
+ return res;
+ }
+
+ res = __verify_ras_table_checksum(control);
+ if (res) {
+ dev_err(adev->dev,
+ "RAS Table incorrect checksum or error:%d\n",
+ res);
+ return -EINVAL;
+ }
+ if (ras->bad_page_cnt_threshold >= control->ras_num_bad_pages) {
+ /* This means that, the threshold was increased since
+ * the last time the system was booted, and now,
+ * ras->bad_page_cnt_threshold - control->num_recs > 0,
+ * so that at least one more record can be saved,
+ * before the page count threshold is reached.
+ */
+ dev_info(adev->dev,
+ "records:%d threshold:%d, resetting "
+ "RAS table header signature",
+ control->ras_num_bad_pages,
+ ras->bad_page_cnt_threshold);
+ res = amdgpu_ras_eeprom_correct_header_tag(control,
+ RAS_TABLE_HDR_VAL);
+ } else {
+ dev_warn(adev->dev,
+ "RAS records:%d exceed threshold:%d\n",
+ control->ras_num_bad_pages, ras->bad_page_cnt_threshold);
+ if ((amdgpu_bad_page_threshold == -1) ||
+ (amdgpu_bad_page_threshold == -2)) {
+ res = 0;
+ dev_warn(adev->dev,
+ "Please consult AMD Service Action Guide (SAG) for appropriate service procedures\n");
+ } else {
+ ras->is_rma = true;
+ dev_warn(adev->dev,
+ "User defined threshold is set, runtime service will be halt when threshold is reached\n");
+ }
+ }
+ }
+
+ return res < 0 ? res : 0;
+}
+
+void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ struct amdgpu_ras_eeprom_control *control;
+ int res;
+
+ if (!__is_ras_eeprom_supported(adev) || !ras)
+ return;
+ control = &ras->eeprom_control;
+ if (!control->is_eeprom_valid)
+ return;
+ res = __verify_ras_table_checksum(control);
+ if (res) {
+ dev_warn(adev->dev,
+ "RAS table incorrect checksum or error:%d, try to recover\n",
+ res);
+ if (!amdgpu_ras_eeprom_reset_table(control))
+ if (!amdgpu_ras_save_bad_pages(adev, NULL))
+ if (!__verify_ras_table_checksum(control)) {
+ dev_info(adev->dev, "RAS table recovery succeed\n");
+ return;
+ }
+ dev_err(adev->dev, "RAS table recovery failed\n");
+ control->is_eeprom_valid = false;
+ }
+ return;
+} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
new file mode 100644
index 000000000000..ebfca4cb5688
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _AMDGPU_RAS_EEPROM_H
+#define _AMDGPU_RAS_EEPROM_H
+
+#include <linux/i2c.h>
+
+#define RAS_TABLE_VER_V1 0x00010000
+#define RAS_TABLE_VER_V2_1 0x00021000
+#define RAS_TABLE_VER_V3 0x00030000
+
+struct amdgpu_device;
+
+enum amdgpu_ras_gpu_health_status {
+ GPU_HEALTH_USABLE = 0,
+ GPU_RETIRED__ECC_REACH_THRESHOLD = 2,
+};
+
+enum amdgpu_ras_eeprom_err_type {
+ AMDGPU_RAS_EEPROM_ERR_NA,
+ AMDGPU_RAS_EEPROM_ERR_RECOVERABLE,
+ AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE,
+ AMDGPU_RAS_EEPROM_ERR_COUNT,
+};
+
+struct amdgpu_ras_eeprom_table_header {
+ uint32_t header;
+ uint32_t version;
+ uint32_t first_rec_offset;
+ uint32_t tbl_size;
+ uint32_t checksum;
+} __packed;
+
+struct amdgpu_ras_eeprom_table_ras_info {
+ u8 rma_status;
+ u8 health_percent;
+ u16 ecc_page_threshold;
+ u32 padding[64 - 1];
+} __packed;
+
+struct amdgpu_ras_eeprom_control {
+ struct amdgpu_ras_eeprom_table_header tbl_hdr;
+
+ struct amdgpu_ras_eeprom_table_ras_info tbl_rai;
+
+ /* Base I2C EEPPROM 19-bit memory address,
+ * where the table is located. For more information,
+ * see top of amdgpu_eeprom.c.
+ */
+ u32 i2c_address;
+
+ /* The byte offset off of @i2c_address
+ * where the table header is found,
+ * and where the records start--always
+ * right after the header.
+ */
+ u32 ras_header_offset;
+ u32 ras_info_offset;
+ u32 ras_record_offset;
+
+ /* Number of records in the table.
+ */
+ u32 ras_num_recs;
+
+ /* the bad page number is ras_num_recs or
+ * ras_num_recs * umc.retire_unit
+ */
+ u32 ras_num_bad_pages;
+
+ /* Number of records store mca address */
+ u32 ras_num_mca_recs;
+
+ /* Number of records store physical address */
+ u32 ras_num_pa_recs;
+
+ /* First record index to read, 0-based.
+ * Range is [0, num_recs-1]. This is
+ * an absolute index, starting right after
+ * the table header.
+ */
+ u32 ras_fri;
+
+ /* Maximum possible number of records
+ * we could store, i.e. the maximum capacity
+ * of the table.
+ */
+ u32 ras_max_record_count;
+
+ /* Protect table access via this mutex.
+ */
+ struct mutex ras_tbl_mutex;
+
+ /* Record channel info which occurred bad pages
+ */
+ u32 bad_channel_bitmap;
+
+ bool is_eeprom_valid;
+};
+
+/*
+ * Represents single table record. Packed to be easily serialized into byte
+ * stream.
+ */
+struct eeprom_table_record {
+
+ union {
+ uint64_t address;
+ uint64_t offset;
+ };
+
+ uint64_t retired_page;
+ uint64_t ts;
+
+ enum amdgpu_ras_eeprom_err_type err_type;
+
+ union {
+ unsigned char bank;
+ unsigned char cu;
+ };
+
+ unsigned char mem_channel;
+ unsigned char mcumc_id;
+} __packed;
+
+int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control);
+
+int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control);
+
+bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev);
+
+int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *records, const u32 num);
+
+int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
+ struct eeprom_table_record *records, const u32 num);
+
+uint32_t amdgpu_ras_eeprom_max_record_count(struct amdgpu_ras_eeprom_control *control);
+
+void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control);
+
+int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control);
+
+void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev);
+
+extern const struct file_operations amdgpu_ras_debugfs_eeprom_size_ops;
+extern const struct file_operations amdgpu_ras_debugfs_eeprom_table_ops;
+
+#endif // _AMDGPU_RAS_EEPROM_H
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
new file mode 100644
index 000000000000..be2e56ce1355
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#ifndef __AMDGPU_RES_CURSOR_H__
+#define __AMDGPU_RES_CURSOR_H__
+
+#include <drm/drm_mm.h>
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_range_manager.h>
+
+#include "amdgpu_vram_mgr.h"
+
+/* state back for walking over vram_mgr and gtt_mgr allocations */
+struct amdgpu_res_cursor {
+ uint64_t start;
+ uint64_t size;
+ uint64_t remaining;
+ void *node;
+ uint32_t mem_type;
+};
+
+/**
+ * amdgpu_res_first - initialize a amdgpu_res_cursor
+ *
+ * @res: TTM resource object to walk
+ * @start: Start of the range
+ * @size: Size of the range
+ * @cur: cursor object to initialize
+ *
+ * Start walking over the range of allocations between @start and @size.
+ */
+static inline void amdgpu_res_first(struct ttm_resource *res,
+ uint64_t start, uint64_t size,
+ struct amdgpu_res_cursor *cur)
+{
+ struct drm_buddy_block *block;
+ struct list_head *head, *next;
+ struct drm_mm_node *node;
+
+ if (!res)
+ goto fallback;
+
+ BUG_ON(start + size > res->size);
+
+ cur->mem_type = res->mem_type;
+
+ switch (cur->mem_type) {
+ case TTM_PL_VRAM:
+ head = &to_amdgpu_vram_mgr_resource(res)->blocks;
+
+ block = list_first_entry_or_null(head,
+ struct drm_buddy_block,
+ link);
+ if (!block)
+ goto fallback;
+
+ while (start >= amdgpu_vram_mgr_block_size(block)) {
+ start -= amdgpu_vram_mgr_block_size(block);
+
+ next = block->link.next;
+ if (next != head)
+ block = list_entry(next, struct drm_buddy_block, link);
+ }
+
+ cur->start = amdgpu_vram_mgr_block_start(block) + start;
+ cur->size = min(amdgpu_vram_mgr_block_size(block) - start, size);
+ cur->remaining = size;
+ cur->node = block;
+ break;
+ case TTM_PL_TT:
+ case AMDGPU_PL_DOORBELL:
+ case AMDGPU_PL_MMIO_REMAP:
+ node = to_ttm_range_mgr_node(res)->mm_nodes;
+ while (start >= node->size << PAGE_SHIFT)
+ start -= node++->size << PAGE_SHIFT;
+
+ cur->start = (node->start << PAGE_SHIFT) + start;
+ cur->size = min((node->size << PAGE_SHIFT) - start, size);
+ cur->remaining = size;
+ cur->node = node;
+ break;
+ default:
+ goto fallback;
+ }
+
+ return;
+
+fallback:
+ cur->start = start;
+ cur->size = size;
+ cur->remaining = size;
+ cur->node = NULL;
+ WARN_ON(res && start + size > res->size);
+}
+
+/**
+ * amdgpu_res_next - advance the cursor
+ *
+ * @cur: the cursor to advance
+ * @size: number of bytes to move forward
+ *
+ * Move the cursor @size bytes forwrad, walking to the next node if necessary.
+ */
+static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
+{
+ struct drm_buddy_block *block;
+ struct drm_mm_node *node;
+ struct list_head *next;
+
+ BUG_ON(size > cur->remaining);
+
+ cur->remaining -= size;
+ if (!cur->remaining)
+ return;
+
+ cur->size -= size;
+ if (cur->size) {
+ cur->start += size;
+ return;
+ }
+
+ switch (cur->mem_type) {
+ case TTM_PL_VRAM:
+ block = cur->node;
+
+ next = block->link.next;
+ block = list_entry(next, struct drm_buddy_block, link);
+
+ cur->node = block;
+ cur->start = amdgpu_vram_mgr_block_start(block);
+ cur->size = min(amdgpu_vram_mgr_block_size(block), cur->remaining);
+ break;
+ case TTM_PL_TT:
+ case AMDGPU_PL_DOORBELL:
+ case AMDGPU_PL_MMIO_REMAP:
+ node = cur->node;
+
+ cur->node = ++node;
+ cur->start = node->start << PAGE_SHIFT;
+ cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
+ break;
+ default:
+ return;
+ }
+}
+
+/**
+ * amdgpu_res_cleared - check if blocks are cleared
+ *
+ * @cur: the cursor to extract the block
+ *
+ * Check if the @cur block is cleared
+ */
+static inline bool amdgpu_res_cleared(struct amdgpu_res_cursor *cur)
+{
+ struct drm_buddy_block *block;
+
+ switch (cur->mem_type) {
+ case TTM_PL_VRAM:
+ block = cur->node;
+
+ if (!amdgpu_vram_mgr_is_cleared(block))
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
new file mode 100644
index 000000000000..28c4ad62f50e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu_reset.h"
+#include "aldebaran.h"
+#include "sienna_cichlid.h"
+#include "smu_v13_0_10.h"
+
+static int amdgpu_reset_xgmi_reset_on_init_suspend(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+ if (!adev->ip_blocks[i].status.hw)
+ continue;
+ /* displays are handled in phase1 */
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
+ continue;
+
+ /* XXX handle errors */
+ amdgpu_ip_block_suspend(&adev->ip_blocks[i]);
+ adev->ip_blocks[i].status.hw = false;
+ }
+
+ /* VCN FW shared region is in frambuffer, there are some flags
+ * initialized in that region during sw_init. Make sure the region is
+ * backed up.
+ */
+ amdgpu_vcn_save_vcpu_bo(adev);
+
+ return 0;
+}
+
+static int amdgpu_reset_xgmi_reset_on_init_prep_hwctxt(
+ struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct list_head *reset_device_list = reset_context->reset_device_list;
+ struct amdgpu_device *tmp_adev;
+ int r;
+
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ amdgpu_unregister_gpu_instance(tmp_adev);
+ r = amdgpu_reset_xgmi_reset_on_init_suspend(tmp_adev);
+ if (r) {
+ dev_err(tmp_adev->dev,
+ "xgmi reset on init: prepare for reset failed");
+ return r;
+ }
+ }
+
+ return r;
+}
+
+static int amdgpu_reset_xgmi_reset_on_init_restore_hwctxt(
+ struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct list_head *reset_device_list = reset_context->reset_device_list;
+ struct amdgpu_device *tmp_adev = NULL;
+ int r;
+
+ r = amdgpu_device_reinit_after_reset(reset_context);
+ if (r)
+ return r;
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ if (!tmp_adev->kfd.init_complete) {
+ kgd2kfd_init_zone_device(tmp_adev);
+ amdgpu_amdkfd_device_init(tmp_adev);
+ amdgpu_amdkfd_drm_client_create(tmp_adev);
+ }
+ }
+
+ return r;
+}
+
+static int amdgpu_reset_xgmi_reset_on_init_perform_reset(
+ struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+ struct list_head *reset_device_list = reset_context->reset_device_list;
+ struct amdgpu_device *tmp_adev = NULL;
+ int r;
+
+ dev_dbg(adev->dev, "xgmi roi - hw reset\n");
+
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ mutex_lock(&tmp_adev->reset_cntl->reset_lock);
+ tmp_adev->reset_cntl->active_reset =
+ amdgpu_asic_reset_method(adev);
+ }
+ r = 0;
+ /* Mode1 reset needs to be triggered on all devices together */
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ /* For XGMI run all resets in parallel to speed up the process */
+ if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
+ r = -EALREADY;
+ if (r) {
+ dev_err(tmp_adev->dev,
+ "xgmi reset on init: reset failed with error, %d",
+ r);
+ break;
+ }
+ }
+
+ /* For XGMI wait for all resets to complete before proceed */
+ if (!r) {
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ flush_work(&tmp_adev->xgmi_reset_work);
+ r = tmp_adev->asic_reset_res;
+ if (r)
+ break;
+ }
+ }
+
+ list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
+ mutex_unlock(&tmp_adev->reset_cntl->reset_lock);
+ tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_NONE;
+ }
+
+ return r;
+}
+
+int amdgpu_reset_do_xgmi_reset_on_init(
+ struct amdgpu_reset_context *reset_context)
+{
+ struct list_head *reset_device_list = reset_context->reset_device_list;
+ struct amdgpu_device *adev;
+ int r;
+
+ if (!reset_device_list || list_empty(reset_device_list) ||
+ list_is_singular(reset_device_list))
+ return -EINVAL;
+
+ adev = list_first_entry(reset_device_list, struct amdgpu_device,
+ reset_list);
+ r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
+ if (r)
+ return r;
+
+ r = amdgpu_reset_perform_reset(adev, reset_context);
+
+ return r;
+}
+
+struct amdgpu_reset_handler xgmi_reset_on_init_handler = {
+ .reset_method = AMD_RESET_METHOD_ON_INIT,
+ .prepare_env = NULL,
+ .prepare_hwcontext = amdgpu_reset_xgmi_reset_on_init_prep_hwctxt,
+ .perform_reset = amdgpu_reset_xgmi_reset_on_init_perform_reset,
+ .restore_hwcontext = amdgpu_reset_xgmi_reset_on_init_restore_hwctxt,
+ .restore_env = NULL,
+ .do_reset = NULL,
+};
+
+int amdgpu_reset_init(struct amdgpu_device *adev)
+{
+ int ret = 0;
+
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
+ case IP_VERSION(13, 0, 14):
+ ret = aldebaran_reset_init(adev);
+ break;
+ case IP_VERSION(11, 0, 7):
+ ret = sienna_cichlid_reset_init(adev);
+ break;
+ case IP_VERSION(13, 0, 10):
+ ret = smu_v13_0_10_reset_init(adev);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+int amdgpu_reset_fini(struct amdgpu_device *adev)
+{
+ int ret = 0;
+
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 6):
+ case IP_VERSION(13, 0, 12):
+ case IP_VERSION(13, 0, 14):
+ ret = aldebaran_reset_fini(adev);
+ break;
+ case IP_VERSION(11, 0, 7):
+ ret = sienna_cichlid_reset_fini(adev);
+ break;
+ case IP_VERSION(13, 0, 10):
+ ret = smu_v13_0_10_reset_fini(adev);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_reset_handler *reset_handler = NULL;
+
+ if (adev->reset_cntl && adev->reset_cntl->get_reset_handler)
+ reset_handler = adev->reset_cntl->get_reset_handler(
+ adev->reset_cntl, reset_context);
+ if (!reset_handler)
+ return -EOPNOTSUPP;
+
+ return reset_handler->prepare_hwcontext(adev->reset_cntl,
+ reset_context);
+}
+
+int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
+ struct amdgpu_reset_context *reset_context)
+{
+ int ret;
+ struct amdgpu_reset_handler *reset_handler = NULL;
+
+ if (adev->reset_cntl)
+ reset_handler = adev->reset_cntl->get_reset_handler(
+ adev->reset_cntl, reset_context);
+ if (!reset_handler)
+ return -EOPNOTSUPP;
+
+ ret = reset_handler->perform_reset(adev->reset_cntl, reset_context);
+ if (ret)
+ return ret;
+
+ return reset_handler->restore_hwcontext(adev->reset_cntl,
+ reset_context);
+}
+
+
+void amdgpu_reset_destroy_reset_domain(struct kref *ref)
+{
+ struct amdgpu_reset_domain *reset_domain = container_of(ref,
+ struct amdgpu_reset_domain,
+ refcount);
+ if (reset_domain->wq)
+ destroy_workqueue(reset_domain->wq);
+
+ kvfree(reset_domain);
+}
+
+struct amdgpu_reset_domain *amdgpu_reset_create_reset_domain(enum amdgpu_reset_domain_type type,
+ char *wq_name)
+{
+ struct amdgpu_reset_domain *reset_domain;
+
+ reset_domain = kvzalloc(sizeof(struct amdgpu_reset_domain), GFP_KERNEL);
+ if (!reset_domain) {
+ DRM_ERROR("Failed to allocate amdgpu_reset_domain!");
+ return NULL;
+ }
+
+ reset_domain->type = type;
+ kref_init(&reset_domain->refcount);
+
+ reset_domain->wq = create_singlethread_workqueue(wq_name);
+ if (!reset_domain->wq) {
+ DRM_ERROR("Failed to allocate wq for amdgpu_reset_domain!");
+ amdgpu_reset_put_reset_domain(reset_domain);
+ return NULL;
+
+ }
+
+ atomic_set(&reset_domain->in_gpu_reset, 0);
+ atomic_set(&reset_domain->reset_res, 0);
+ init_rwsem(&reset_domain->sem);
+
+ return reset_domain;
+}
+
+void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain)
+{
+ atomic_set(&reset_domain->in_gpu_reset, 1);
+ down_write(&reset_domain->sem);
+}
+
+
+void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain)
+{
+ atomic_set(&reset_domain->in_gpu_reset, 0);
+ up_write(&reset_domain->sem);
+}
+
+void amdgpu_reset_get_desc(struct amdgpu_reset_context *rst_ctxt, char *buf,
+ size_t len)
+{
+ if (!buf || !len)
+ return;
+
+ switch (rst_ctxt->src) {
+ case AMDGPU_RESET_SRC_JOB:
+ if (rst_ctxt->job) {
+ snprintf(buf, len, "job hang on ring:%s",
+ rst_ctxt->job->base.sched->name);
+ } else {
+ strscpy(buf, "job hang", len);
+ }
+ break;
+ case AMDGPU_RESET_SRC_RAS:
+ strscpy(buf, "RAS error", len);
+ break;
+ case AMDGPU_RESET_SRC_MES:
+ strscpy(buf, "MES hang", len);
+ break;
+ case AMDGPU_RESET_SRC_HWS:
+ strscpy(buf, "HWS hang", len);
+ break;
+ case AMDGPU_RESET_SRC_USER:
+ strscpy(buf, "user trigger", len);
+ break;
+ case AMDGPU_RESET_SRC_USERQ:
+ strscpy(buf, "user queue trigger", len);
+ break;
+ default:
+ strscpy(buf, "unknown", len);
+ }
+}
+
+bool amdgpu_reset_in_recovery(struct amdgpu_device *adev)
+{
+ return (adev->init_lvl->level == AMDGPU_INIT_LEVEL_RESET_RECOVERY);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
new file mode 100644
index 000000000000..07b4d37f1db6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_RESET_H__
+#define __AMDGPU_RESET_H__
+
+#include "amdgpu.h"
+
+#define AMDGPU_RESET_MAX_HANDLERS 5
+
+enum AMDGPU_RESET_FLAGS {
+
+ AMDGPU_NEED_FULL_RESET = 0,
+ AMDGPU_SKIP_HW_RESET = 1,
+ AMDGPU_SKIP_COREDUMP = 2,
+ AMDGPU_HOST_FLR = 3,
+};
+
+enum AMDGPU_RESET_SRCS {
+ AMDGPU_RESET_SRC_UNKNOWN,
+ AMDGPU_RESET_SRC_JOB,
+ AMDGPU_RESET_SRC_RAS,
+ AMDGPU_RESET_SRC_MES,
+ AMDGPU_RESET_SRC_HWS,
+ AMDGPU_RESET_SRC_USER,
+ AMDGPU_RESET_SRC_USERQ,
+};
+
+struct amdgpu_reset_context {
+ enum amd_reset_method method;
+ struct amdgpu_device *reset_req_dev;
+ struct amdgpu_job *job;
+ struct amdgpu_hive_info *hive;
+ struct list_head *reset_device_list;
+ unsigned long flags;
+ enum AMDGPU_RESET_SRCS src;
+};
+
+struct amdgpu_reset_handler {
+ enum amd_reset_method reset_method;
+ int (*prepare_env)(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *context);
+ int (*prepare_hwcontext)(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *context);
+ int (*perform_reset)(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *context);
+ int (*restore_hwcontext)(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *context);
+ int (*restore_env)(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *context);
+
+ int (*do_reset)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_reset_control {
+ void *handle;
+ struct work_struct reset_work;
+ struct mutex reset_lock;
+ struct amdgpu_reset_handler *(
+ *reset_handlers)[AMDGPU_RESET_MAX_HANDLERS];
+ atomic_t in_reset;
+ enum amd_reset_method active_reset;
+ struct amdgpu_reset_handler *(*get_reset_handler)(
+ struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *context);
+ void (*async_reset)(struct work_struct *work);
+};
+
+
+enum amdgpu_reset_domain_type {
+ SINGLE_DEVICE,
+ XGMI_HIVE
+};
+
+struct amdgpu_reset_domain {
+ struct kref refcount;
+ struct workqueue_struct *wq;
+ enum amdgpu_reset_domain_type type;
+ struct rw_semaphore sem;
+ atomic_t in_gpu_reset;
+ atomic_t reset_res;
+};
+
+int amdgpu_reset_init(struct amdgpu_device *adev);
+int amdgpu_reset_fini(struct amdgpu_device *adev);
+
+int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev,
+ struct amdgpu_reset_context *reset_context);
+
+int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
+ struct amdgpu_reset_context *reset_context);
+
+int amdgpu_reset_prepare_env(struct amdgpu_device *adev,
+ struct amdgpu_reset_context *reset_context);
+int amdgpu_reset_restore_env(struct amdgpu_device *adev,
+ struct amdgpu_reset_context *reset_context);
+
+struct amdgpu_reset_domain *amdgpu_reset_create_reset_domain(enum amdgpu_reset_domain_type type,
+ char *wq_name);
+
+void amdgpu_reset_destroy_reset_domain(struct kref *ref);
+
+static inline bool amdgpu_reset_get_reset_domain(struct amdgpu_reset_domain *domain)
+{
+ return kref_get_unless_zero(&domain->refcount) != 0;
+}
+
+static inline void amdgpu_reset_put_reset_domain(struct amdgpu_reset_domain *domain)
+{
+ if (domain)
+ kref_put(&domain->refcount, amdgpu_reset_destroy_reset_domain);
+}
+
+static inline bool amdgpu_reset_domain_schedule(struct amdgpu_reset_domain *domain,
+ struct work_struct *work)
+{
+ return queue_work(domain->wq, work);
+}
+
+static inline bool amdgpu_reset_pending(struct amdgpu_reset_domain *domain)
+{
+ lockdep_assert_held(&domain->sem);
+ return rwsem_is_contended(&domain->sem);
+}
+
+void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain);
+
+void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain);
+
+void amdgpu_reset_get_desc(struct amdgpu_reset_context *rst_ctxt, char *buf,
+ size_t len);
+
+#define for_each_handler(i, handler, reset_ctl) \
+ for (i = 0; (i < AMDGPU_RESET_MAX_HANDLERS) && \
+ (handler = (*reset_ctl->reset_handlers)[i]); \
+ ++i)
+
+extern struct amdgpu_reset_handler xgmi_reset_on_init_handler;
+int amdgpu_reset_do_xgmi_reset_on_init(
+ struct amdgpu_reset_context *reset_context);
+
+bool amdgpu_reset_in_recovery(struct amdgpu_device *adev);
+
+static inline void amdgpu_reset_set_dpc_status(struct amdgpu_device *adev,
+ bool status)
+{
+ adev->pcie_reset_ctx.occurs_dpc = status;
+ adev->no_hw_access = status;
+}
+
+static inline bool amdgpu_reset_in_dpc(struct amdgpu_device *adev)
+{
+ return adev->pcie_reset_ctx.occurs_dpc;
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 6a85db0c0bc3..8f6ce948c684 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -28,8 +28,9 @@
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/uaccess.h>
#include <linux/debugfs.h>
-#include <drm/drmP.h>
+
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "atom.h"
@@ -47,21 +48,37 @@
* wptr. The GPU then starts fetching commands and executes
* them until the pointers are equal again.
*/
-static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
- struct amdgpu_ring *ring);
-static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
+
+/**
+ * amdgpu_ring_max_ibs - Return max IBs that fit in a single submission.
+ *
+ * @type: ring type for which to return the limit.
+ */
+unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type)
+{
+ switch (type) {
+ case AMDGPU_RING_TYPE_GFX:
+ /* Need to keep at least 192 on GFX7+ for old radv. */
+ return 192;
+ case AMDGPU_RING_TYPE_COMPUTE:
+ return 125;
+ case AMDGPU_RING_TYPE_VCN_JPEG:
+ return 16;
+ default:
+ return 49;
+ }
+}
/**
* amdgpu_ring_alloc - allocate space on the ring buffer
*
- * @adev: amdgpu_device pointer
* @ring: amdgpu_ring structure holding ring information
* @ndw: number of dwords to allocate in the ring buffer
*
* Allocate @ndw dwords in the ring buffer (all asics).
* Returns 0 on success, error on failure.
*/
-int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
+int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw)
{
/* Align requested size with padding so unlock_commit can
* pad safely */
@@ -82,6 +99,29 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
return 0;
}
+/**
+ * amdgpu_ring_alloc_reemit - allocate space on the ring buffer for reemit
+ *
+ * @ring: amdgpu_ring structure holding ring information
+ * @ndw: number of dwords to allocate in the ring buffer
+ *
+ * Allocate @ndw dwords in the ring buffer (all asics).
+ * doesn't check the max_dw limit as we may be reemitting
+ * several submissions.
+ */
+static void amdgpu_ring_alloc_reemit(struct amdgpu_ring *ring, unsigned int ndw)
+{
+ /* Align requested size with padding so unlock_commit can
+ * pad safely */
+ ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
+
+ ring->count_dw = ndw;
+ ring->wptr_old = ring->wptr;
+
+ if (ring->funcs->begin_use)
+ ring->funcs->begin_use(ring);
+}
+
/** amdgpu_ring_insert_nop - insert NOP packets
*
* @ring: amdgpu_ring structure holding ring information
@@ -91,13 +131,26 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
*/
void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
- int i;
+ uint32_t occupied, chunk1, chunk2;
- for (i = 0; i < count; i++)
- amdgpu_ring_write(ring, ring->funcs->nop);
+ occupied = ring->wptr & ring->buf_mask;
+ chunk1 = ring->buf_mask + 1 - occupied;
+ chunk1 = (chunk1 >= count) ? count : chunk1;
+ chunk2 = count - chunk1;
+
+ if (chunk1)
+ memset32(&ring->ring[occupied], ring->funcs->nop, chunk1);
+
+ if (chunk2)
+ memset32(ring->ring, ring->funcs->nop, chunk2);
+
+ ring->wptr += count;
+ ring->wptr &= ring->ptr_mask;
+ ring->count_dw -= count;
}
-/** amdgpu_ring_generic_pad_ib - pad IB with NOP packets
+/**
+ * amdgpu_ring_generic_pad_ib - pad IB with NOP packets
*
* @ring: amdgpu_ring structure holding ring information
* @ib: IB to add NOP packets to
@@ -114,7 +167,6 @@ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
* amdgpu_ring_commit - tell the GPU to execute the new
* commands on the ring buffer
*
- * @adev: amdgpu_device pointer
* @ring: amdgpu_ring structure holding ring information
*
* Update the wptr (write pointer) to tell the GPU to
@@ -124,11 +176,16 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
{
uint32_t count;
+ if (ring->count_dw < 0)
+ DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
+
/* We pad to match fetch size */
count = ring->funcs->align_mask + 1 -
(ring->wptr & ring->funcs->align_mask);
- count %= ring->funcs->align_mask + 1;
- ring->funcs->insert_nop(ring, count);
+ count &= ring->funcs->align_mask;
+
+ if (count != 0)
+ ring->funcs->insert_nop(ring, count);
mb();
amdgpu_ring_set_wptr(ring);
@@ -152,146 +209,280 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
ring->funcs->end_use(ring);
}
+#define amdgpu_ring_get_gpu_addr(ring, offset) \
+ (ring->adev->wb.gpu_addr + offset * 4)
+
+#define amdgpu_ring_get_cpu_addr(ring, offset) \
+ (&ring->adev->wb.wb[offset])
+
/**
* amdgpu_ring_init - init driver ring struct.
*
* @adev: amdgpu_device pointer
* @ring: amdgpu_ring structure holding ring information
- * @max_ndw: maximum number of dw for ring alloc
- * @nop: nop packet for this ring
+ * @max_dw: maximum number of dw for ring alloc
+ * @irq_src: interrupt source to use for this ring
+ * @irq_type: interrupt type to use for this ring
+ * @hw_prio: ring priority (NORMAL/HIGH)
+ * @sched_score: optional score atomic shared with other schedulers
*
* Initialize the driver information for the selected ring (all asics).
* Returns 0 on success, error on failure.
*/
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
- unsigned max_dw, struct amdgpu_irq_src *irq_src,
- unsigned irq_type)
+ unsigned int max_dw, struct amdgpu_irq_src *irq_src,
+ unsigned int irq_type, unsigned int hw_prio,
+ atomic_t *sched_score)
{
int r;
+ int sched_hw_submission = amdgpu_sched_hw_submission;
+ u32 *num_sched;
+ u32 hw_ip;
+ unsigned int max_ibs_dw;
+
+ /* Set the hw submission limit higher for KIQ because
+ * it's used for a number of gfx/compute tasks by both
+ * KFD and KGD which may have outstanding fences and
+ * it doesn't really use the gpu scheduler anyway;
+ * KIQ tasks get submitted directly to the ring.
+ */
+ if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ sched_hw_submission = max(sched_hw_submission, 256);
+ if (ring->funcs->type == AMDGPU_RING_TYPE_MES)
+ sched_hw_submission = 8;
+ else if (ring == &adev->sdma.instance[0].page)
+ sched_hw_submission = 256;
if (ring->adev == NULL) {
if (adev->num_rings >= AMDGPU_MAX_RINGS)
return -EINVAL;
ring->adev = adev;
+ ring->num_hw_submission = sched_hw_submission;
+ ring->sched_score = sched_score;
+ ring->vmid_wait = dma_fence_get_stub();
+
ring->idx = adev->num_rings++;
adev->rings[ring->idx] = ring;
- r = amdgpu_fence_driver_init_ring(ring,
- amdgpu_sched_hw_submission);
+
+ r = amdgpu_fence_driver_init_ring(ring);
if (r)
return r;
}
- if (ring->funcs->support_64bit_ptrs) {
- r = amdgpu_wb_get_64bit(adev, &ring->rptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
- return r;
- }
-
- r = amdgpu_wb_get_64bit(adev, &ring->wptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
- return r;
- }
-
- } else {
- r = amdgpu_wb_get(adev, &ring->rptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
- return r;
- }
-
- r = amdgpu_wb_get(adev, &ring->wptr_offs);
- if (r) {
- dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
- return r;
- }
+ r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
+ return r;
+ }
+ r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
+ if (r) {
+ dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
+ return r;
}
- r = amdgpu_wb_get(adev, &ring->fence_offs);
+ r = amdgpu_device_wb_get(adev, &ring->fence_offs);
if (r) {
dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
return r;
}
- r = amdgpu_wb_get(adev, &ring->cond_exe_offs);
+ r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
if (r) {
- dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
+ dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r);
return r;
}
- ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4);
- ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs];
- /* always set cond_exec_polling to CONTINUE */
- *ring->cond_exe_cpu_addr = 1;
- r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
+ r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
if (r) {
- dev_err(adev->dev, "failed initializing fences (%d).\n", r);
+ dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
return r;
}
- ring->ring_size = roundup_pow_of_two(max_dw * 4 *
- amdgpu_sched_hw_submission);
+ ring->fence_gpu_addr =
+ amdgpu_ring_get_gpu_addr(ring, ring->fence_offs);
+ ring->fence_cpu_addr =
+ amdgpu_ring_get_cpu_addr(ring, ring->fence_offs);
+
+ ring->rptr_gpu_addr =
+ amdgpu_ring_get_gpu_addr(ring, ring->rptr_offs);
+ ring->rptr_cpu_addr =
+ amdgpu_ring_get_cpu_addr(ring, ring->rptr_offs);
+
+ ring->wptr_gpu_addr =
+ amdgpu_ring_get_gpu_addr(ring, ring->wptr_offs);
+ ring->wptr_cpu_addr =
+ amdgpu_ring_get_cpu_addr(ring, ring->wptr_offs);
+
+ ring->trail_fence_gpu_addr =
+ amdgpu_ring_get_gpu_addr(ring, ring->trail_fence_offs);
+ ring->trail_fence_cpu_addr =
+ amdgpu_ring_get_cpu_addr(ring, ring->trail_fence_offs);
+
+ ring->cond_exe_gpu_addr =
+ amdgpu_ring_get_gpu_addr(ring, ring->cond_exe_offs);
+ ring->cond_exe_cpu_addr =
+ amdgpu_ring_get_cpu_addr(ring, ring->cond_exe_offs);
+
+ /* always set cond_exec_polling to CONTINUE */
+ *ring->cond_exe_cpu_addr = 1;
+
+ if (ring->funcs->type != AMDGPU_RING_TYPE_CPER) {
+ r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
+ if (r) {
+ dev_err(adev->dev, "failed initializing fences (%d).\n", r);
+ return r;
+ }
+
+ max_ibs_dw = ring->funcs->emit_frame_size +
+ amdgpu_ring_max_ibs(ring->funcs->type) * ring->funcs->emit_ib_size;
+ max_ibs_dw = (max_ibs_dw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
+
+ if (WARN_ON(max_ibs_dw > max_dw))
+ max_dw = max_ibs_dw;
+
+ ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
+ } else {
+ ring->ring_size = roundup_pow_of_two(max_dw * 4);
+ ring->count_dw = (ring->ring_size - 4) >> 2;
+ /* ring buffer is empty now */
+ ring->wptr = *ring->rptr_cpu_addr = 0;
+ }
ring->buf_mask = (ring->ring_size / 4) - 1;
ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
0xffffffffffffffff : ring->buf_mask;
+ /* Initialize cached_rptr to 0 */
+ ring->cached_rptr = 0;
+
+ if (!ring->ring_backup) {
+ ring->ring_backup = kvzalloc(ring->ring_size, GFP_KERNEL);
+ if (!ring->ring_backup)
+ return -ENOMEM;
+ }
+
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
- r = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
+ r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_bytes,
+ PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&ring->ring_obj,
&ring->gpu_addr,
(void **)&ring->ring);
if (r) {
dev_err(adev->dev, "(%d) ring create failed\n", r);
+ kvfree(ring->ring_backup);
return r;
}
amdgpu_ring_clear_ring(ring);
}
ring->max_dw = max_dw;
+ ring->hw_prio = hw_prio;
- if (amdgpu_debugfs_ring_init(adev, ring)) {
- DRM_ERROR("Failed to register debugfs file for rings !\n");
+ if (!ring->no_scheduler && ring->funcs->type < AMDGPU_HW_IP_NUM) {
+ hw_ip = ring->funcs->type;
+ num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
+ adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
+ &ring->sched;
}
+
return 0;
}
/**
* amdgpu_ring_fini - tear down the driver ring struct.
*
- * @adev: amdgpu_device pointer
* @ring: amdgpu_ring structure holding ring information
*
* Tear down the driver information for the selected ring (all asics).
*/
void amdgpu_ring_fini(struct amdgpu_ring *ring)
{
- ring->ready = false;
- if (ring->funcs->support_64bit_ptrs) {
- amdgpu_wb_free_64bit(ring->adev, ring->cond_exe_offs);
- amdgpu_wb_free_64bit(ring->adev, ring->fence_offs);
- amdgpu_wb_free_64bit(ring->adev, ring->rptr_offs);
- amdgpu_wb_free_64bit(ring->adev, ring->wptr_offs);
- } else {
- amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
- amdgpu_wb_free(ring->adev, ring->fence_offs);
- amdgpu_wb_free(ring->adev, ring->rptr_offs);
- amdgpu_wb_free(ring->adev, ring->wptr_offs);
- }
+ /* Not to finish a ring which is not initialized */
+ if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
+ return;
+ ring->sched.ready = false;
+
+ amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
+ amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
+
+ amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
+ amdgpu_device_wb_free(ring->adev, ring->fence_offs);
amdgpu_bo_free_kernel(&ring->ring_obj,
&ring->gpu_addr,
(void **)&ring->ring);
+ kvfree(ring->ring_backup);
+ ring->ring_backup = NULL;
- amdgpu_debugfs_ring_fini(ring);
+ dma_fence_put(ring->vmid_wait);
+ ring->vmid_wait = NULL;
+ ring->me = 0;
+}
- ring->adev->rings[ring->idx] = NULL;
+/**
+ * amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper
+ *
+ * @ring: ring to write to
+ * @reg0: register to write
+ * @reg1: register to wait on
+ * @ref: reference value to write/wait on
+ * @mask: mask to wait on
+ *
+ * Helper for rings that don't support write and wait in a
+ * single oneshot packet.
+ */
+void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
+ uint32_t reg0, uint32_t reg1,
+ uint32_t ref, uint32_t mask)
+{
+ amdgpu_ring_emit_wreg(ring, reg0, ref);
+ amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
+}
+
+/**
+ * amdgpu_ring_soft_recovery - try to soft recover a ring lockup
+ *
+ * @ring: ring to try the recovery on
+ * @vmid: VMID we try to get going again
+ * @fence: timedout fence
+ *
+ * Tries to get a ring proceeding again when it is stuck.
+ */
+bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
+ struct dma_fence *fence)
+{
+ unsigned long flags;
+ ktime_t deadline;
+ bool ret;
+
+ if (unlikely(ring->adev->debug_disable_soft_recovery))
+ return false;
+
+ deadline = ktime_add_us(ktime_get(), 10000);
+
+ if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
+ return false;
+
+ spin_lock_irqsave(fence->lock, flags);
+ if (!dma_fence_is_signaled_locked(fence))
+ dma_fence_set_error(fence, -ENODATA);
+ spin_unlock_irqrestore(fence->lock, flags);
+
+ while (!dma_fence_is_signaled(fence) &&
+ ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0)
+ ring->funcs->soft_recovery(ring, vmid);
+
+ ret = dma_fence_is_signaled(fence);
+ /* increment the counter only if soft reset worked */
+ if (ret)
+ atomic_inc(&ring->adev->gpu_reset_counter);
+
+ return ret;
}
/*
@@ -310,8 +501,10 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_ring *ring = file_inode(f)->i_private;
- int r, i;
uint32_t value, result, early[3];
+ uint64_t p;
+ loff_t i;
+ int r;
if (*pos & 3 || size & 3)
return -EINVAL;
@@ -319,13 +512,18 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
result = 0;
if (*pos < 12) {
- early[0] = amdgpu_ring_get_rptr(ring);
+ if (ring->funcs->type == AMDGPU_RING_TYPE_CPER)
+ mutex_lock(&ring->adev->cper.ring_lock);
+
+ early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
early[2] = ring->wptr & ring->buf_mask;
for (i = *pos / 4; i < 3 && size; i++) {
r = put_user(early[i], (uint32_t *)buf);
- if (r)
- return r;
+ if (r) {
+ result = r;
+ goto out;
+ }
buf += 4;
result += 4;
size -= 4;
@@ -333,56 +531,328 @@ static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
}
}
- while (size) {
- if (*pos >= (ring->ring_size + 12))
- return result;
+ if (ring->funcs->type != AMDGPU_RING_TYPE_CPER) {
+ while (size) {
+ if (*pos >= (ring->ring_size + 12))
+ return result;
- value = ring->ring[(*pos - 12)/4];
- r = put_user(value, (uint32_t*)buf);
- if (r)
- return r;
- buf += 4;
- result += 4;
- size -= 4;
- *pos += 4;
+ value = ring->ring[(*pos - 12)/4];
+ r = put_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+ buf += 4;
+ result += 4;
+ size -= 4;
+ *pos += 4;
+ }
+ } else {
+ p = early[0];
+ if (early[0] <= early[1])
+ size = (early[1] - early[0]);
+ else
+ size = ring->ring_size - (early[0] - early[1]);
+
+ while (size) {
+ if (p == early[1])
+ goto out;
+
+ value = ring->ring[p];
+ r = put_user(value, (uint32_t *)buf);
+ if (r) {
+ result = r;
+ goto out;
+ }
+
+ buf += 4;
+ result += 4;
+ size--;
+ p++;
+ p &= ring->ptr_mask;
+ }
}
+out:
+ if (ring->funcs->type == AMDGPU_RING_TYPE_CPER)
+ mutex_unlock(&ring->adev->cper.ring_lock);
+
return result;
}
+static ssize_t amdgpu_debugfs_virt_ring_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_ring *ring = file_inode(f)->i_private;
+
+ if (*pos & 3 || size & 3)
+ return -EINVAL;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_CPER)
+ amdgpu_virt_req_ras_cper_dump(ring->adev, false);
+
+ return amdgpu_debugfs_ring_read(f, buf, size, pos);
+}
+
static const struct file_operations amdgpu_debugfs_ring_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_ring_read,
.llseek = default_llseek
};
+static const struct file_operations amdgpu_debugfs_virt_ring_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_virt_ring_read,
+ .llseek = default_llseek
+};
+
+static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_ring *ring = file_inode(f)->i_private;
+ ssize_t bytes = min_t(ssize_t, ring->mqd_size - *pos, size);
+ void *from = ((u8 *)ring->mqd_ptr) + *pos;
+
+ if (*pos > ring->mqd_size)
+ return 0;
+
+ if (copy_to_user(buf, from, bytes))
+ return -EFAULT;
+
+ *pos += bytes;
+ return bytes;
+}
+
+static const struct file_operations amdgpu_debugfs_mqd_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_mqd_read,
+ .llseek = default_llseek
+};
+
+static int amdgpu_debugfs_ring_error(void *data, u64 val)
+{
+ struct amdgpu_ring *ring = data;
+
+ amdgpu_fence_driver_set_error(ring, val);
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(amdgpu_debugfs_error_fops, NULL,
+ amdgpu_debugfs_ring_error, "%lld\n");
+
#endif
-static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
+void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
{
#if defined(CONFIG_DEBUG_FS)
- struct drm_minor *minor = adev->ddev->primary;
- struct dentry *ent, *root = minor->debugfs_root;
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
char name[32];
sprintf(name, "amdgpu_ring_%s", ring->name);
+ if (amdgpu_sriov_vf(adev))
+ debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
+ &amdgpu_debugfs_virt_ring_fops,
+ ring->ring_size + 12);
+ else
+ debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
+ &amdgpu_debugfs_ring_fops,
+ ring->ring_size + 12);
+
+ if (ring->mqd_obj) {
+ sprintf(name, "amdgpu_mqd_%s", ring->name);
+ debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
+ &amdgpu_debugfs_mqd_fops,
+ ring->mqd_size);
+ }
- ent = debugfs_create_file(name,
- S_IFREG | S_IRUGO, root,
- ring, &amdgpu_debugfs_ring_fops);
- if (!ent)
- return -ENOMEM;
+ sprintf(name, "amdgpu_error_%s", ring->name);
+ debugfs_create_file(name, 0200, root, ring,
+ &amdgpu_debugfs_error_fops);
- i_size_write(ent->d_inode, ring->ring_size + 12);
- ring->ent = ent;
#endif
+}
+
+/**
+ * amdgpu_ring_test_helper - tests ring and set sched readiness status
+ *
+ * @ring: ring to try the recovery on
+ *
+ * Tests ring and set sched readiness status
+ *
+ * Returns 0 on success, error on failure.
+ */
+int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r;
+
+ r = amdgpu_ring_test_ring(ring);
+ if (r)
+ DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n",
+ ring->name, r);
+ else
+ DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n",
+ ring->name);
+
+ ring->sched.ready = !r;
+
+ return r;
+}
+
+static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring,
+ struct amdgpu_mqd_prop *prop)
+{
+ struct amdgpu_device *adev = ring->adev;
+ bool is_high_prio_compute = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE &&
+ amdgpu_gfx_is_high_priority_compute_queue(adev, ring);
+ bool is_high_prio_gfx = ring->funcs->type == AMDGPU_RING_TYPE_GFX &&
+ amdgpu_gfx_is_high_priority_graphics_queue(adev, ring);
+
+ memset(prop, 0, sizeof(*prop));
+
+ prop->mqd_gpu_addr = ring->mqd_gpu_addr;
+ prop->hqd_base_gpu_addr = ring->gpu_addr;
+ prop->rptr_gpu_addr = ring->rptr_gpu_addr;
+ prop->wptr_gpu_addr = ring->wptr_gpu_addr;
+ prop->queue_size = ring->ring_size;
+ prop->eop_gpu_addr = ring->eop_gpu_addr;
+ prop->use_doorbell = ring->use_doorbell;
+ prop->doorbell_index = ring->doorbell_index;
+ prop->kernel_queue = true;
+
+ /* map_queues packet doesn't need activate the queue,
+ * so only kiq need set this field.
+ */
+ prop->hqd_active = ring->funcs->type == AMDGPU_RING_TYPE_KIQ;
+
+ prop->allow_tunneling = is_high_prio_compute;
+ if (is_high_prio_compute || is_high_prio_gfx) {
+ prop->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
+ prop->hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
+ }
+}
+
+int amdgpu_ring_init_mqd(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_mqd *mqd_mgr;
+ struct amdgpu_mqd_prop prop;
+
+ amdgpu_ring_to_mqd_prop(ring, &prop);
+
+ ring->wptr = 0;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ mqd_mgr = &adev->mqds[AMDGPU_HW_IP_COMPUTE];
+ else
+ mqd_mgr = &adev->mqds[ring->funcs->type];
+
+ return mqd_mgr->init_mqd(adev, ring->mqd_ptr, &prop);
+}
+
+void amdgpu_ring_ib_begin(struct amdgpu_ring *ring)
+{
+ if (ring->is_sw_ring)
+ amdgpu_sw_ring_ib_begin(ring);
+}
+
+void amdgpu_ring_ib_end(struct amdgpu_ring *ring)
+{
+ if (ring->is_sw_ring)
+ amdgpu_sw_ring_ib_end(ring);
+}
+
+void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring)
+{
+ if (ring->is_sw_ring)
+ amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CONTROL);
+}
+
+void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring)
+{
+ if (ring->is_sw_ring)
+ amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CE);
+}
+
+void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring)
+{
+ if (ring->is_sw_ring)
+ amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_DE);
+}
+
+bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring)
+{
+ if (!ring)
+ return false;
+
+ if (ring->no_scheduler || !drm_sched_wqueue_ready(&ring->sched))
+ return false;
+
+ return true;
+}
+
+void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence)
+{
+ /* Stop the scheduler to prevent anybody else from touching the ring buffer. */
+ drm_sched_wqueue_stop(&ring->sched);
+ /* back up the non-guilty commands */
+ amdgpu_ring_backup_unprocessed_commands(ring, guilty_fence);
+}
+
+int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence)
+{
+ unsigned int i;
+ int r;
+
+ /* verify that the ring is functional */
+ r = amdgpu_ring_test_ring(ring);
+ if (r)
+ return r;
+
+ /* signal the fence of the bad job */
+ if (guilty_fence)
+ amdgpu_fence_driver_guilty_force_completion(guilty_fence);
+ /* Re-emit the non-guilty commands */
+ if (ring->ring_backup_entries_to_copy) {
+ amdgpu_ring_alloc_reemit(ring, ring->ring_backup_entries_to_copy);
+ for (i = 0; i < ring->ring_backup_entries_to_copy; i++)
+ amdgpu_ring_write(ring, ring->ring_backup[i]);
+ amdgpu_ring_commit(ring);
+ }
+ /* Start the scheduler again */
+ drm_sched_wqueue_start(&ring->sched);
return 0;
}
-static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
+bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring,
+ u32 reset_type)
{
-#if defined(CONFIG_DEBUG_FS)
- debugfs_remove(ring->ent);
-#endif
+ switch (ring->funcs->type) {
+ case AMDGPU_RING_TYPE_GFX:
+ if (ring->adev->gfx.gfx_supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_COMPUTE:
+ if (ring->adev->gfx.compute_supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_SDMA:
+ if (ring->adev->sdma.supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_VCN_DEC:
+ case AMDGPU_RING_TYPE_VCN_ENC:
+ if (ring->adev->vcn.supported_reset & reset_type)
+ return true;
+ break;
+ case AMDGPU_RING_TYPE_VCN_JPEG:
+ if (ring->adev->jpeg.supported_reset & reset_type)
+ return true;
+ break;
+ default:
+ break;
+ }
+ return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 944443c5b90a..b6b649179776 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -24,46 +24,101 @@
#ifndef __AMDGPU_RING_H__
#define __AMDGPU_RING_H__
-#include "gpu_scheduler.h"
+#include <drm/amdgpu_drm.h>
+#include <drm/gpu_scheduler.h>
+#include <drm/drm_print.h>
+#include <drm/drm_suballoc.h>
+
+struct amdgpu_device;
+struct amdgpu_ring;
+struct amdgpu_ib;
+struct amdgpu_cs_parser;
+struct amdgpu_job;
+struct amdgpu_vm;
/* max number of rings */
-#define AMDGPU_MAX_RINGS 18
-#define AMDGPU_MAX_GFX_RINGS 1
+#define AMDGPU_MAX_RINGS 149
+#define AMDGPU_MAX_HWIP_RINGS 64
+#define AMDGPU_MAX_GFX_RINGS 2
+#define AMDGPU_MAX_SW_GFX_RINGS 2
#define AMDGPU_MAX_COMPUTE_RINGS 8
#define AMDGPU_MAX_VCE_RINGS 3
#define AMDGPU_MAX_UVD_ENC_RINGS 2
+#define AMDGPU_MAX_VPE_RINGS 2
+
+enum amdgpu_ring_priority_level {
+ AMDGPU_RING_PRIO_0,
+ AMDGPU_RING_PRIO_1,
+ AMDGPU_RING_PRIO_DEFAULT = 1,
+ AMDGPU_RING_PRIO_2,
+ AMDGPU_RING_PRIO_MAX
+};
/* some special values for the owner field */
-#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
-#define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
+#define AMDGPU_FENCE_OWNER_UNDEFINED ((void *)0ul)
+#define AMDGPU_FENCE_OWNER_VM ((void *)1ul)
+#define AMDGPU_FENCE_OWNER_KFD ((void *)2ul)
#define AMDGPU_FENCE_FLAG_64BIT (1 << 0)
#define AMDGPU_FENCE_FLAG_INT (1 << 1)
+#define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2)
+#define AMDGPU_FENCE_FLAG_EXEC (1 << 3)
+
+#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
+
+#define AMDGPU_IB_POOL_SIZE (1024 * 1024)
enum amdgpu_ring_type {
- AMDGPU_RING_TYPE_GFX,
- AMDGPU_RING_TYPE_COMPUTE,
- AMDGPU_RING_TYPE_SDMA,
- AMDGPU_RING_TYPE_UVD,
- AMDGPU_RING_TYPE_VCE,
+ AMDGPU_RING_TYPE_GFX = AMDGPU_HW_IP_GFX,
+ AMDGPU_RING_TYPE_COMPUTE = AMDGPU_HW_IP_COMPUTE,
+ AMDGPU_RING_TYPE_SDMA = AMDGPU_HW_IP_DMA,
+ AMDGPU_RING_TYPE_UVD = AMDGPU_HW_IP_UVD,
+ AMDGPU_RING_TYPE_VCE = AMDGPU_HW_IP_VCE,
+ AMDGPU_RING_TYPE_UVD_ENC = AMDGPU_HW_IP_UVD_ENC,
+ AMDGPU_RING_TYPE_VCN_DEC = AMDGPU_HW_IP_VCN_DEC,
+ AMDGPU_RING_TYPE_VCN_ENC = AMDGPU_HW_IP_VCN_ENC,
+ AMDGPU_RING_TYPE_VCN_JPEG = AMDGPU_HW_IP_VCN_JPEG,
+ AMDGPU_RING_TYPE_VPE = AMDGPU_HW_IP_VPE,
AMDGPU_RING_TYPE_KIQ,
- AMDGPU_RING_TYPE_UVD_ENC
+ AMDGPU_RING_TYPE_MES,
+ AMDGPU_RING_TYPE_UMSCH_MM,
+ AMDGPU_RING_TYPE_CPER,
};
-struct amdgpu_device;
-struct amdgpu_ring;
-struct amdgpu_ib;
-struct amdgpu_cs_parser;
+enum amdgpu_ib_pool_type {
+ /* Normal submissions to the top of the pipeline. */
+ AMDGPU_IB_POOL_DELAYED,
+ /* Immediate submissions to the bottom of the pipeline. */
+ AMDGPU_IB_POOL_IMMEDIATE,
+ /* Direct submission to the ring buffer during init and reset. */
+ AMDGPU_IB_POOL_DIRECT,
+
+ AMDGPU_IB_POOL_MAX
+};
+
+struct amdgpu_ib {
+ struct drm_suballoc *sa_bo;
+ uint32_t length_dw;
+ uint64_t gpu_addr;
+ uint32_t *ptr;
+ uint32_t flags;
+};
+
+struct amdgpu_sched {
+ u32 num_scheds;
+ struct drm_gpu_scheduler *sched[AMDGPU_MAX_HWIP_RINGS];
+};
/*
* Fences.
*/
struct amdgpu_fence_driver {
uint64_t gpu_addr;
- volatile uint32_t *cpu_addr;
+ uint32_t *cpu_addr;
/* sync_seq is protected by ring emission lock */
uint32_t sync_seq;
atomic_t last_seq;
+ u64 signalled_wptr;
bool initialized;
struct amdgpu_irq_src *irq_src;
unsigned irq_type;
@@ -73,54 +128,127 @@ struct amdgpu_fence_driver {
struct dma_fence **fences;
};
-int amdgpu_fence_driver_init(struct amdgpu_device *adev);
-void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
-void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
+/*
+ * Fences mark an event in the GPUs pipeline and are used
+ * for GPU/CPU synchronization. When the fence is written,
+ * it is expected that all buffers associated with that fence
+ * are no longer in use by the associated ring on the GPU and
+ * that the relevant GPU caches have been flushed.
+ */
+
+struct amdgpu_fence {
+ struct dma_fence base;
+
+ /* RB, DMA, etc. */
+ struct amdgpu_ring *ring;
+ ktime_t start_timestamp;
+
+ /* wptr for the fence for resets */
+ u64 wptr;
+ /* fence context for resets */
+ u64 context;
+ uint32_t seq;
+};
+
+extern const struct drm_sched_backend_ops amdgpu_sched_ops;
+
+void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring);
+void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error);
+void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
+void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence);
+void amdgpu_fence_save_wptr(struct dma_fence *fence);
-int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
- unsigned num_hw_submission);
+int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq_src,
unsigned irq_type);
-void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
-void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence);
-void amdgpu_fence_process(struct amdgpu_ring *ring);
+void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
+void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
+int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
+void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
+ struct amdgpu_fence *af, unsigned int flags);
+int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
+ uint32_t timeout);
+bool amdgpu_fence_process(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
+signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
+ uint32_t wait_seq,
+ signed long timeout);
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
+void amdgpu_fence_driver_isr_toggle(struct amdgpu_device *adev, bool stop);
+
+u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring);
+void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq,
+ ktime_t timestamp);
+
/*
* Rings.
*/
/* provided by hw blocks that expose a ring buffer for commands */
struct amdgpu_ring_funcs {
+ /**
+ * @type:
+ *
+ * GFX, Compute, SDMA, UVD, VCE, VCN, VPE, KIQ, MES, UMSCH, and CPER
+ * use ring buffers. The type field just identifies which component the
+ * ring buffer is associated with.
+ */
enum amdgpu_ring_type type;
uint32_t align_mask;
+
+ /**
+ * @nop:
+ *
+ * Every block in the amdgpu has no-op instructions (e.g., GFX 10
+ * uses PACKET3(PACKET3_NOP, 0x3FFF), VCN 5 uses VCN_ENC_CMD_NO_OP,
+ * etc). This field receives the specific no-op for the component
+ * that initializes the ring.
+ */
u32 nop;
bool support_64bit_ptrs;
- unsigned vmhub;
+ bool no_user_fence;
+ bool secure_submission_supported;
+
+ /**
+ * @extra_bytes:
+ *
+ * Optional extra space in bytes that is added to the ring size
+ * when allocating the BO that holds the contents of the ring.
+ * This space isn't used for command submission to the ring,
+ * but is just there to satisfy some hardware requirements or
+ * implement workarounds. It's up to the implementation of each
+ * specific ring to initialize this space.
+ */
+ unsigned extra_bytes;
/* ring read/write ptr handling */
u64 (*get_rptr)(struct amdgpu_ring *ring);
u64 (*get_wptr)(struct amdgpu_ring *ring);
void (*set_wptr)(struct amdgpu_ring *ring);
/* validating and patching of IBs */
- int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+ int (*parse_cs)(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib);
+ int (*patch_cs_in_place)(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib);
/* constants to calculate how many DW are needed for an emit */
unsigned emit_frame_size;
unsigned emit_ib_size;
/* command emit functions */
void (*emit_ib)(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch);
+ uint32_t flags);
void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
uint64_t seq, unsigned flags);
void (*emit_pipeline_sync)(struct amdgpu_ring *ring);
- void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
+ void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vmid,
uint64_t pd_addr);
void (*emit_hdp_flush)(struct amdgpu_ring *ring);
- void (*emit_hdp_invalidate)(struct amdgpu_ring *ring);
void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
uint32_t gds_base, uint32_t gds_size,
uint32_t gws_base, uint32_t gws_size,
@@ -130,76 +258,324 @@ struct amdgpu_ring_funcs {
int (*test_ib)(struct amdgpu_ring *ring, long timeout);
/* insert NOP packets */
void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
+ void (*insert_start)(struct amdgpu_ring *ring);
void (*insert_end)(struct amdgpu_ring *ring);
/* pad the indirect buffer to the necessary number of dw */
void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
- unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
- void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
+ unsigned (*init_cond_exec)(struct amdgpu_ring *ring, uint64_t addr);
/* note usage for clock and power gating */
void (*begin_use)(struct amdgpu_ring *ring);
void (*end_use)(struct amdgpu_ring *ring);
void (*emit_switch_buffer) (struct amdgpu_ring *ring);
void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
- void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
+ void (*emit_gfx_shadow)(struct amdgpu_ring *ring, u64 shadow_va, u64 csa_va,
+ u64 gds_va, bool init_shadow, int vmid);
+ void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t reg_val_offs);
void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
+ void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask);
+ void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
+ uint32_t reg0, uint32_t reg1,
+ uint32_t ref, uint32_t mask);
+ void (*emit_frame_cntl)(struct amdgpu_ring *ring, bool start,
+ bool secure);
+ /* Try to soft recover the ring to make the fence signal */
+ void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
+ int (*preempt_ib)(struct amdgpu_ring *ring);
+ void (*emit_mem_sync)(struct amdgpu_ring *ring);
+ void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable);
+ void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset);
+ void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset);
+ void (*patch_de)(struct amdgpu_ring *ring, unsigned offset);
+ int (*reset)(struct amdgpu_ring *ring, unsigned int vmid,
+ struct amdgpu_fence *timedout_fence);
+ void (*emit_cleaner_shader)(struct amdgpu_ring *ring);
};
+/**
+ * amdgpu_ring - Holds ring information
+ */
struct amdgpu_ring {
struct amdgpu_device *adev;
const struct amdgpu_ring_funcs *funcs;
struct amdgpu_fence_driver fence_drv;
- struct amd_gpu_scheduler sched;
+ struct drm_gpu_scheduler sched;
struct amdgpu_bo *ring_obj;
- volatile uint32_t *ring;
+ uint32_t *ring;
+ /* backups for resets */
+ uint32_t *ring_backup;
+ unsigned int ring_backup_entries_to_copy;
unsigned rptr_offs;
+ u64 rptr_gpu_addr;
+ u32 *rptr_cpu_addr;
+
+ /**
+ * @wptr:
+ *
+ * This is part of the Ring buffer implementation and represents the
+ * write pointer. The wptr determines where the host has written.
+ */
u64 wptr;
+
+ /**
+ * @wptr_old:
+ *
+ * Before update wptr with the new value, usually the old value is
+ * stored in the wptr_old.
+ */
u64 wptr_old;
unsigned ring_size;
+
+ /**
+ * @max_dw:
+ *
+ * Maximum number of DWords for ring allocation. This information is
+ * provided at the ring initialization time, and each IP block can
+ * specify a specific value. Check places that invoke
+ * amdgpu_ring_init() to see the maximum size per block.
+ */
unsigned max_dw;
+
+ /**
+ * @count_dw:
+ *
+ * This value starts with the maximum amount of DWords supported by the
+ * ring. This value is updated based on the ring manipulation.
+ */
int count_dw;
uint64_t gpu_addr;
+
+ /**
+ * @ptr_mask:
+ *
+ * Some IPs provide support for 64-bit pointers and others for 32-bit
+ * only; this behavior is component-specific and defined by the field
+ * support_64bit_ptr. If the IP block supports 64-bits, the mask
+ * 0xffffffffffffffff is set; otherwise, this value assumes buf_mask.
+ * Notice that this field is used to keep wptr under a valid range.
+ */
uint64_t ptr_mask;
+
+ /**
+ * @buf_mask:
+ *
+ * Buffer mask is a value used to keep wptr count under its
+ * thresholding. Buffer mask initialized during the ring buffer
+ * initialization time, and it is defined as (ring_size / 4) -1.
+ */
uint32_t buf_mask;
- bool ready;
u32 idx;
+ u32 xcc_id;
+ u32 xcp_id;
u32 me;
u32 pipe;
u32 queue;
struct amdgpu_bo *mqd_obj;
uint64_t mqd_gpu_addr;
void *mqd_ptr;
+ unsigned mqd_size;
uint64_t eop_gpu_addr;
u32 doorbell_index;
bool use_doorbell;
+ bool use_pollmem;
unsigned wptr_offs;
+ u64 wptr_gpu_addr;
+
+ /**
+ * @wptr_cpu_addr:
+ *
+ * This is the CPU address pointer in the writeback slot. This is used
+ * to commit changes to the GPU.
+ */
+ u32 *wptr_cpu_addr;
unsigned fence_offs;
+ u64 fence_gpu_addr;
+ u32 *fence_cpu_addr;
uint64_t current_ctx;
char name[16];
+ u32 trail_seq;
+ unsigned trail_fence_offs;
+ u64 trail_fence_gpu_addr;
+ u32 *trail_fence_cpu_addr;
unsigned cond_exe_offs;
u64 cond_exe_gpu_addr;
- volatile u32 *cond_exe_cpu_addr;
+ u32 *cond_exe_cpu_addr;
+ unsigned int set_q_mode_offs;
+ u32 *set_q_mode_ptr;
+ u64 set_q_mode_token;
+ unsigned vm_hub;
unsigned vm_inv_eng;
-#if defined(CONFIG_DEBUG_FS)
- struct dentry *ent;
-#endif
+ struct dma_fence *vmid_wait;
+ bool has_compute_vm_bug;
+ bool no_scheduler;
+ bool no_user_submission;
+ int hw_prio;
+ unsigned num_hw_submission;
+ atomic_t *sched_score;
+
+ bool is_sw_ring;
+ unsigned int entry_index;
+ /* store the cached rptr to restore after reset */
+ uint64_t cached_rptr;
};
+#define amdgpu_ring_parse_cs(r, p, job, ib) ((r)->funcs->parse_cs((p), (job), (ib)))
+#define amdgpu_ring_patch_cs_in_place(r, p, job, ib) ((r)->funcs->patch_cs_in_place((p), (job), (ib)))
+#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
+#define amdgpu_ring_test_ib(r, t) ((r)->funcs->test_ib ? (r)->funcs->test_ib((r), (t)) : 0)
+#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
+#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
+#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
+#define amdgpu_ring_emit_ib(r, job, ib, flags) ((r)->funcs->emit_ib((r), (job), (ib), (flags)))
+#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
+#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
+#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
+#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
+#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
+#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
+#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
+#define amdgpu_ring_emit_gfx_shadow(r, s, c, g, i, v) ((r)->funcs->emit_gfx_shadow((r), (s), (c), (g), (i), (v)))
+#define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o))
+#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
+#define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
+#define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
+#define amdgpu_ring_emit_frame_cntl(r, b, s) (r)->funcs->emit_frame_cntl((r), (b), (s))
+#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
+#define amdgpu_ring_init_cond_exec(r, a) (r)->funcs->init_cond_exec((r), (a))
+#define amdgpu_ring_preempt_ib(r) (r)->funcs->preempt_ib(r)
+#define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o)))
+#define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o)))
+#define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o)))
+#define amdgpu_ring_reset(r, v, f) (r)->funcs->reset((r), (v), (f))
+
+unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type);
int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
+void amdgpu_ring_ib_begin(struct amdgpu_ring *ring);
+void amdgpu_ring_ib_end(struct amdgpu_ring *ring);
+void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring);
+void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring);
+void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring);
+
void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
- unsigned ring_size, struct amdgpu_irq_src *irq_src,
- unsigned irq_type);
+ unsigned int max_dw, struct amdgpu_irq_src *irq_src,
+ unsigned int irq_type, unsigned int hw_prio,
+ atomic_t *sched_score);
void amdgpu_ring_fini(struct amdgpu_ring *ring);
+void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
+ uint32_t reg0, uint32_t val0,
+ uint32_t reg1, uint32_t val1);
+bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
+ struct dma_fence *fence);
+
+static inline void amdgpu_ring_set_preempt_cond_exec(struct amdgpu_ring *ring,
+ bool cond_exec)
+{
+ *ring->cond_exe_cpu_addr = cond_exec;
+}
+
static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
{
- int i = 0;
- while (i <= ring->buf_mask)
- ring->ring[i++] = ring->funcs->nop;
+ memset32(ring->ring, ring->funcs->nop, ring->buf_mask + 1);
+}
+
+static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
+{
+ ring->ring[ring->wptr++ & ring->buf_mask] = v;
+ ring->wptr &= ring->ptr_mask;
+ ring->count_dw--;
+}
+
+static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
+ void *src, int count_dw)
+{
+ unsigned occupied, chunk1, chunk2;
+
+ occupied = ring->wptr & ring->buf_mask;
+ chunk1 = ring->buf_mask + 1 - occupied;
+ chunk1 = (chunk1 >= count_dw) ? count_dw : chunk1;
+ chunk2 = count_dw - chunk1;
+ chunk1 <<= 2;
+ chunk2 <<= 2;
+
+ if (chunk1)
+ memcpy(&ring->ring[occupied], src, chunk1);
+ if (chunk2) {
+ src += chunk1;
+ memcpy(ring->ring, src, chunk2);
+ }
+
+ ring->wptr += count_dw;
+ ring->wptr &= ring->ptr_mask;
+ ring->count_dw -= count_dw;
+}
+
+/**
+ * amdgpu_ring_patch_cond_exec - patch dw count of conditional execute
+ * @ring: amdgpu_ring structure
+ * @offset: offset returned by amdgpu_ring_init_cond_exec
+ *
+ * Calculate the dw count and patch it into a cond_exec command.
+ */
+static inline void amdgpu_ring_patch_cond_exec(struct amdgpu_ring *ring,
+ unsigned int offset)
+{
+ unsigned cur;
+
+ if (!ring->funcs->init_cond_exec)
+ return;
+
+ WARN_ON(offset > ring->buf_mask);
+ WARN_ON(ring->ring[offset] != 0);
+
+ cur = (ring->wptr - 1) & ring->buf_mask;
+ if (cur < offset)
+ cur += ring->ring_size >> 2;
+ ring->ring[offset] = cur - offset;
+}
+
+int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
+
+void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
+
+int amdgpu_ring_init_mqd(struct amdgpu_ring *ring);
+
+static inline u32 amdgpu_ib_get_value(struct amdgpu_ib *ib, int idx)
+{
+ return ib->ptr[idx];
+}
+
+static inline void amdgpu_ib_set_value(struct amdgpu_ib *ib, int idx,
+ uint32_t value)
+{
+ ib->ptr[idx] = value;
}
+int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ unsigned size,
+ enum amdgpu_ib_pool_type pool,
+ struct amdgpu_ib *ib);
+void amdgpu_ib_free(struct amdgpu_ib *ib, struct dma_fence *f);
+int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
+ struct amdgpu_ib *ibs, struct amdgpu_job *job,
+ struct dma_fence **f);
+int amdgpu_ib_pool_init(struct amdgpu_device *adev);
+void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
+int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
+bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring);
+void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence);
+void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence);
+int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring,
+ struct amdgpu_fence *guilty_fence);
+bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring,
+ u32 reset_type);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
new file mode 100644
index 000000000000..7e7d6c3865bc
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.c
@@ -0,0 +1,576 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/slab.h>
+#include <drm/drm_print.h>
+
+#include "amdgpu_ring_mux.h"
+#include "amdgpu_ring.h"
+#include "amdgpu.h"
+
+#define AMDGPU_MUX_RESUBMIT_JIFFIES_TIMEOUT (HZ / 2)
+#define AMDGPU_MAX_LAST_UNSIGNALED_THRESHOLD_US 10000
+
+static const struct ring_info {
+ unsigned int hw_pio;
+ const char *ring_name;
+} sw_ring_info[] = {
+ { AMDGPU_RING_PRIO_DEFAULT, "gfx_low"},
+ { AMDGPU_RING_PRIO_2, "gfx_high"},
+};
+
+static struct kmem_cache *amdgpu_mux_chunk_slab;
+
+static inline struct amdgpu_mux_entry *amdgpu_ring_mux_sw_entry(struct amdgpu_ring_mux *mux,
+ struct amdgpu_ring *ring)
+{
+ return ring->entry_index < mux->ring_entry_size ?
+ &mux->ring_entry[ring->entry_index] : NULL;
+}
+
+/* copy packages on sw ring range[begin, end) */
+static void amdgpu_ring_mux_copy_pkt_from_sw_ring(struct amdgpu_ring_mux *mux,
+ struct amdgpu_ring *ring,
+ u64 s_start, u64 s_end)
+{
+ u64 start, end;
+ struct amdgpu_ring *real_ring = mux->real_ring;
+
+ start = s_start & ring->buf_mask;
+ end = s_end & ring->buf_mask;
+
+ if (start == end) {
+ DRM_ERROR("no more data copied from sw ring\n");
+ return;
+ }
+ if (start > end) {
+ amdgpu_ring_alloc(real_ring, (ring->ring_size >> 2) + end - start);
+ amdgpu_ring_write_multiple(real_ring, (void *)&ring->ring[start],
+ (ring->ring_size >> 2) - start);
+ amdgpu_ring_write_multiple(real_ring, (void *)&ring->ring[0], end);
+ } else {
+ amdgpu_ring_alloc(real_ring, end - start);
+ amdgpu_ring_write_multiple(real_ring, (void *)&ring->ring[start], end - start);
+ }
+}
+
+static void amdgpu_mux_resubmit_chunks(struct amdgpu_ring_mux *mux)
+{
+ struct amdgpu_mux_entry *e = NULL;
+ struct amdgpu_mux_chunk *chunk;
+ uint32_t seq, last_seq;
+ int i;
+
+ /*find low priority entries:*/
+ if (!mux->s_resubmit)
+ return;
+
+ for (i = 0; i < mux->num_ring_entries; i++) {
+ if (mux->ring_entry[i].ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT) {
+ e = &mux->ring_entry[i];
+ break;
+ }
+ }
+
+ if (!e) {
+ DRM_ERROR("%s no low priority ring found\n", __func__);
+ return;
+ }
+
+ last_seq = atomic_read(&e->ring->fence_drv.last_seq);
+ seq = mux->seqno_to_resubmit;
+ if (last_seq < seq) {
+ /*resubmit all the fences between (last_seq, seq]*/
+ list_for_each_entry(chunk, &e->list, entry) {
+ if (chunk->sync_seq > last_seq && chunk->sync_seq <= seq) {
+ amdgpu_fence_update_start_timestamp(e->ring,
+ chunk->sync_seq,
+ ktime_get());
+ if (chunk->sync_seq ==
+ le32_to_cpu(*(e->ring->fence_drv.cpu_addr + 2))) {
+ if (chunk->cntl_offset <= e->ring->buf_mask)
+ amdgpu_ring_patch_cntl(e->ring,
+ chunk->cntl_offset);
+ if (chunk->ce_offset <= e->ring->buf_mask)
+ amdgpu_ring_patch_ce(e->ring, chunk->ce_offset);
+ if (chunk->de_offset <= e->ring->buf_mask)
+ amdgpu_ring_patch_de(e->ring, chunk->de_offset);
+ }
+ amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, e->ring,
+ chunk->start,
+ chunk->end);
+ mux->wptr_resubmit = chunk->end;
+ amdgpu_ring_commit(mux->real_ring);
+ }
+ }
+ }
+
+ timer_delete(&mux->resubmit_timer);
+ mux->s_resubmit = false;
+}
+
+static void amdgpu_ring_mux_schedule_resubmit(struct amdgpu_ring_mux *mux)
+{
+ mod_timer(&mux->resubmit_timer, jiffies + AMDGPU_MUX_RESUBMIT_JIFFIES_TIMEOUT);
+}
+
+static void amdgpu_mux_resubmit_fallback(struct timer_list *t)
+{
+ struct amdgpu_ring_mux *mux = timer_container_of(mux, t,
+ resubmit_timer);
+
+ if (!spin_trylock(&mux->lock)) {
+ amdgpu_ring_mux_schedule_resubmit(mux);
+ DRM_ERROR("reschedule resubmit\n");
+ return;
+ }
+ amdgpu_mux_resubmit_chunks(mux);
+ spin_unlock(&mux->lock);
+}
+
+int amdgpu_ring_mux_init(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring,
+ unsigned int entry_size)
+{
+ mux->real_ring = ring;
+ mux->num_ring_entries = 0;
+
+ mux->ring_entry = kcalloc(entry_size, sizeof(struct amdgpu_mux_entry), GFP_KERNEL);
+ if (!mux->ring_entry)
+ return -ENOMEM;
+
+ mux->ring_entry_size = entry_size;
+ mux->s_resubmit = false;
+
+ amdgpu_mux_chunk_slab = KMEM_CACHE(amdgpu_mux_chunk, SLAB_HWCACHE_ALIGN);
+ if (!amdgpu_mux_chunk_slab) {
+ DRM_ERROR("create amdgpu_mux_chunk cache failed\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&mux->lock);
+ timer_setup(&mux->resubmit_timer, amdgpu_mux_resubmit_fallback, 0);
+
+ return 0;
+}
+
+void amdgpu_ring_mux_fini(struct amdgpu_ring_mux *mux)
+{
+ struct amdgpu_mux_entry *e;
+ struct amdgpu_mux_chunk *chunk, *chunk2;
+ int i;
+
+ for (i = 0; i < mux->num_ring_entries; i++) {
+ e = &mux->ring_entry[i];
+ list_for_each_entry_safe(chunk, chunk2, &e->list, entry) {
+ list_del(&chunk->entry);
+ kmem_cache_free(amdgpu_mux_chunk_slab, chunk);
+ }
+ }
+ kmem_cache_destroy(amdgpu_mux_chunk_slab);
+ kfree(mux->ring_entry);
+ mux->ring_entry = NULL;
+ mux->num_ring_entries = 0;
+ mux->ring_entry_size = 0;
+}
+
+int amdgpu_ring_mux_add_sw_ring(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
+{
+ struct amdgpu_mux_entry *e;
+
+ if (mux->num_ring_entries >= mux->ring_entry_size) {
+ DRM_ERROR("add sw ring exceeding max entry size\n");
+ return -ENOENT;
+ }
+
+ e = &mux->ring_entry[mux->num_ring_entries];
+ ring->entry_index = mux->num_ring_entries;
+ e->ring = ring;
+
+ INIT_LIST_HEAD(&e->list);
+ mux->num_ring_entries += 1;
+ return 0;
+}
+
+void amdgpu_ring_mux_set_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring, u64 wptr)
+{
+ struct amdgpu_mux_entry *e;
+
+ spin_lock(&mux->lock);
+
+ if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT)
+ amdgpu_mux_resubmit_chunks(mux);
+
+ e = amdgpu_ring_mux_sw_entry(mux, ring);
+ if (!e) {
+ DRM_ERROR("cannot find entry for sw ring\n");
+ spin_unlock(&mux->lock);
+ return;
+ }
+
+ /* We could skip this set wptr as preemption in process. */
+ if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT && mux->pending_trailing_fence_signaled) {
+ spin_unlock(&mux->lock);
+ return;
+ }
+
+ e->sw_cptr = e->sw_wptr;
+ /* Update cptr if the package already copied in resubmit functions */
+ if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT && e->sw_cptr < mux->wptr_resubmit)
+ e->sw_cptr = mux->wptr_resubmit;
+ e->sw_wptr = wptr;
+ e->start_ptr_in_hw_ring = mux->real_ring->wptr;
+
+ /* Skip copying for the packages already resubmitted.*/
+ if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT || mux->wptr_resubmit < wptr) {
+ amdgpu_ring_mux_copy_pkt_from_sw_ring(mux, ring, e->sw_cptr, wptr);
+ e->end_ptr_in_hw_ring = mux->real_ring->wptr;
+ amdgpu_ring_commit(mux->real_ring);
+ } else {
+ e->end_ptr_in_hw_ring = mux->real_ring->wptr;
+ }
+ spin_unlock(&mux->lock);
+}
+
+u64 amdgpu_ring_mux_get_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
+{
+ struct amdgpu_mux_entry *e;
+
+ e = amdgpu_ring_mux_sw_entry(mux, ring);
+ if (!e) {
+ DRM_ERROR("cannot find entry for sw ring\n");
+ return 0;
+ }
+
+ return e->sw_wptr;
+}
+
+/**
+ * amdgpu_ring_mux_get_rptr - get the readptr of the software ring
+ * @mux: the multiplexer the software rings attach to
+ * @ring: the software ring of which we calculate the readptr
+ *
+ * The return value of the readptr is not precise while the other rings could
+ * write data onto the real ring buffer.After overwriting on the real ring, we
+ * can not decide if our packages have been excuted or not read yet. However,
+ * this function is only called by the tools such as umr to collect the latest
+ * packages for the hang analysis. We assume the hang happens near our latest
+ * submit. Thus we could use the following logic to give the clue:
+ * If the readptr is between start and end, then we return the copy pointer
+ * plus the distance from start to readptr. If the readptr is before start, we
+ * return the copy pointer. Lastly, if the readptr is past end, we return the
+ * write pointer.
+ */
+u64 amdgpu_ring_mux_get_rptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
+{
+ struct amdgpu_mux_entry *e;
+ u64 readp, offset, start, end;
+
+ e = amdgpu_ring_mux_sw_entry(mux, ring);
+ if (!e) {
+ DRM_ERROR("no sw entry found!\n");
+ return 0;
+ }
+
+ readp = amdgpu_ring_get_rptr(mux->real_ring);
+
+ start = e->start_ptr_in_hw_ring & mux->real_ring->buf_mask;
+ end = e->end_ptr_in_hw_ring & mux->real_ring->buf_mask;
+ if (start > end) {
+ if (readp <= end)
+ readp += mux->real_ring->ring_size >> 2;
+ end += mux->real_ring->ring_size >> 2;
+ }
+
+ if (start <= readp && readp <= end) {
+ offset = readp - start;
+ e->sw_rptr = (e->sw_cptr + offset) & ring->buf_mask;
+ } else if (readp < start) {
+ e->sw_rptr = e->sw_cptr;
+ } else {
+ /* end < readptr */
+ e->sw_rptr = e->sw_wptr;
+ }
+
+ return e->sw_rptr;
+}
+
+u64 amdgpu_sw_ring_get_rptr_gfx(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
+
+ WARN_ON(!ring->is_sw_ring);
+ return amdgpu_ring_mux_get_rptr(mux, ring);
+}
+
+u64 amdgpu_sw_ring_get_wptr_gfx(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
+
+ WARN_ON(!ring->is_sw_ring);
+ return amdgpu_ring_mux_get_wptr(mux, ring);
+}
+
+void amdgpu_sw_ring_set_wptr_gfx(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
+
+ WARN_ON(!ring->is_sw_ring);
+ amdgpu_ring_mux_set_wptr(mux, ring, ring->wptr);
+}
+
+/* Override insert_nop to prevent emitting nops to the software rings */
+void amdgpu_sw_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+ WARN_ON(!ring->is_sw_ring);
+}
+
+const char *amdgpu_sw_ring_name(int idx)
+{
+ return idx < ARRAY_SIZE(sw_ring_info) ?
+ sw_ring_info[idx].ring_name : NULL;
+}
+
+unsigned int amdgpu_sw_ring_priority(int idx)
+{
+ return idx < ARRAY_SIZE(sw_ring_info) ?
+ sw_ring_info[idx].hw_pio : AMDGPU_RING_PRIO_DEFAULT;
+}
+
+/*Scan on low prio rings to have unsignaled fence and high ring has no fence.*/
+static int amdgpu_mcbp_scan(struct amdgpu_ring_mux *mux)
+{
+ struct amdgpu_ring *ring;
+ int i, need_preempt;
+
+ need_preempt = 0;
+ for (i = 0; i < mux->num_ring_entries; i++) {
+ ring = mux->ring_entry[i].ring;
+ if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT &&
+ amdgpu_fence_count_emitted(ring) > 0)
+ return 0;
+ if (ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT &&
+ amdgpu_fence_last_unsignaled_time_us(ring) >
+ AMDGPU_MAX_LAST_UNSIGNALED_THRESHOLD_US)
+ need_preempt = 1;
+ }
+ return need_preempt && !mux->s_resubmit;
+}
+
+/* Trigger Mid-Command Buffer Preemption (MCBP) and find if we need to resubmit. */
+static int amdgpu_mcbp_trigger_preempt(struct amdgpu_ring_mux *mux)
+{
+ int r;
+
+ spin_lock(&mux->lock);
+ mux->pending_trailing_fence_signaled = true;
+ r = amdgpu_ring_preempt_ib(mux->real_ring);
+ spin_unlock(&mux->lock);
+ return r;
+}
+
+void amdgpu_sw_ring_ib_begin(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
+
+ WARN_ON(!ring->is_sw_ring);
+ if (adev->gfx.mcbp && ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT) {
+ if (amdgpu_mcbp_scan(mux) > 0)
+ amdgpu_mcbp_trigger_preempt(mux);
+ return;
+ }
+
+ amdgpu_ring_mux_start_ib(mux, ring);
+}
+
+void amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
+
+ WARN_ON(!ring->is_sw_ring);
+ if (adev->gfx.mcbp && ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT)
+ return;
+ amdgpu_ring_mux_end_ib(mux, ring);
+}
+
+void amdgpu_sw_ring_ib_mark_offset(struct amdgpu_ring *ring, enum amdgpu_ring_mux_offset_type type)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ring_mux *mux = &adev->gfx.muxer;
+ unsigned offset;
+
+ if (ring->hw_prio > AMDGPU_RING_PRIO_DEFAULT)
+ return;
+
+ offset = ring->wptr & ring->buf_mask;
+
+ amdgpu_ring_mux_ib_mark_offset(mux, ring, offset, type);
+}
+
+void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
+{
+ struct amdgpu_mux_entry *e;
+ struct amdgpu_mux_chunk *chunk;
+
+ spin_lock(&mux->lock);
+ amdgpu_mux_resubmit_chunks(mux);
+ spin_unlock(&mux->lock);
+
+ e = amdgpu_ring_mux_sw_entry(mux, ring);
+ if (!e) {
+ DRM_ERROR("cannot find entry!\n");
+ return;
+ }
+
+ chunk = kmem_cache_alloc(amdgpu_mux_chunk_slab, GFP_KERNEL);
+ if (!chunk) {
+ DRM_ERROR("alloc amdgpu_mux_chunk_slab failed\n");
+ return;
+ }
+
+ chunk->start = ring->wptr;
+ /* the initialized value used to check if they are set by the ib submission*/
+ chunk->cntl_offset = ring->buf_mask + 1;
+ chunk->de_offset = ring->buf_mask + 1;
+ chunk->ce_offset = ring->buf_mask + 1;
+ list_add_tail(&chunk->entry, &e->list);
+}
+
+static void scan_and_remove_signaled_chunk(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
+{
+ uint32_t last_seq = 0;
+ struct amdgpu_mux_entry *e;
+ struct amdgpu_mux_chunk *chunk, *tmp;
+
+ e = amdgpu_ring_mux_sw_entry(mux, ring);
+ if (!e) {
+ DRM_ERROR("cannot find entry!\n");
+ return;
+ }
+
+ last_seq = atomic_read(&ring->fence_drv.last_seq);
+
+ list_for_each_entry_safe(chunk, tmp, &e->list, entry) {
+ if (chunk->sync_seq <= last_seq) {
+ list_del(&chunk->entry);
+ kmem_cache_free(amdgpu_mux_chunk_slab, chunk);
+ }
+ }
+}
+
+void amdgpu_ring_mux_ib_mark_offset(struct amdgpu_ring_mux *mux,
+ struct amdgpu_ring *ring, u64 offset,
+ enum amdgpu_ring_mux_offset_type type)
+{
+ struct amdgpu_mux_entry *e;
+ struct amdgpu_mux_chunk *chunk;
+
+ e = amdgpu_ring_mux_sw_entry(mux, ring);
+ if (!e) {
+ DRM_ERROR("cannot find entry!\n");
+ return;
+ }
+
+ chunk = list_last_entry(&e->list, struct amdgpu_mux_chunk, entry);
+ if (!chunk) {
+ DRM_ERROR("cannot find chunk!\n");
+ return;
+ }
+
+ switch (type) {
+ case AMDGPU_MUX_OFFSET_TYPE_CONTROL:
+ chunk->cntl_offset = offset;
+ break;
+ case AMDGPU_MUX_OFFSET_TYPE_DE:
+ chunk->de_offset = offset;
+ break;
+ case AMDGPU_MUX_OFFSET_TYPE_CE:
+ chunk->ce_offset = offset;
+ break;
+ default:
+ DRM_ERROR("invalid type (%d)\n", type);
+ break;
+ }
+}
+
+void amdgpu_ring_mux_end_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring)
+{
+ struct amdgpu_mux_entry *e;
+ struct amdgpu_mux_chunk *chunk;
+
+ e = amdgpu_ring_mux_sw_entry(mux, ring);
+ if (!e) {
+ DRM_ERROR("cannot find entry!\n");
+ return;
+ }
+
+ chunk = list_last_entry(&e->list, struct amdgpu_mux_chunk, entry);
+ if (!chunk) {
+ DRM_ERROR("cannot find chunk!\n");
+ return;
+ }
+
+ chunk->end = ring->wptr;
+ chunk->sync_seq = READ_ONCE(ring->fence_drv.sync_seq);
+
+ scan_and_remove_signaled_chunk(mux, ring);
+}
+
+bool amdgpu_mcbp_handle_trailing_fence_irq(struct amdgpu_ring_mux *mux)
+{
+ struct amdgpu_mux_entry *e;
+ struct amdgpu_ring *ring = NULL;
+ int i;
+
+ if (!mux->pending_trailing_fence_signaled)
+ return false;
+
+ if (mux->real_ring->trail_seq != le32_to_cpu(*mux->real_ring->trail_fence_cpu_addr))
+ return false;
+
+ for (i = 0; i < mux->num_ring_entries; i++) {
+ e = &mux->ring_entry[i];
+ if (e->ring->hw_prio <= AMDGPU_RING_PRIO_DEFAULT) {
+ ring = e->ring;
+ break;
+ }
+ }
+
+ if (!ring) {
+ DRM_ERROR("cannot find low priority ring\n");
+ return false;
+ }
+
+ amdgpu_fence_process(ring);
+ if (amdgpu_fence_count_emitted(ring) > 0) {
+ mux->s_resubmit = true;
+ mux->seqno_to_resubmit = ring->fence_drv.sync_seq;
+ amdgpu_ring_mux_schedule_resubmit(mux);
+ }
+
+ mux->pending_trailing_fence_signaled = false;
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h
new file mode 100644
index 000000000000..d3186b570b82
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring_mux.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_RING_MUX__
+#define __AMDGPU_RING_MUX__
+
+#include <linux/timer.h>
+#include <linux/spinlock.h>
+#include "amdgpu_ring.h"
+
+struct amdgpu_ring;
+
+/**
+ * struct amdgpu_mux_entry - the entry recording software rings copying information.
+ * @ring: the pointer to the software ring.
+ * @start_ptr_in_hw_ring: last start location copied to in the hardware ring.
+ * @end_ptr_in_hw_ring: last end location copied to in the hardware ring.
+ * @sw_cptr: the position of the copy pointer in the sw ring.
+ * @sw_rptr: the read pointer in software ring.
+ * @sw_wptr: the write pointer in software ring.
+ * @list: list head for amdgpu_mux_chunk
+ */
+struct amdgpu_mux_entry {
+ struct amdgpu_ring *ring;
+ u64 start_ptr_in_hw_ring;
+ u64 end_ptr_in_hw_ring;
+ u64 sw_cptr;
+ u64 sw_rptr;
+ u64 sw_wptr;
+ struct list_head list;
+};
+
+enum amdgpu_ring_mux_offset_type {
+ AMDGPU_MUX_OFFSET_TYPE_CONTROL,
+ AMDGPU_MUX_OFFSET_TYPE_DE,
+ AMDGPU_MUX_OFFSET_TYPE_CE,
+};
+
+enum ib_complete_status {
+ /* IB not started/reset value, default value. */
+ IB_COMPLETION_STATUS_DEFAULT = 0,
+ /* IB preempted, started but not completed. */
+ IB_COMPLETION_STATUS_PREEMPTED = 1,
+ /* IB completed. */
+ IB_COMPLETION_STATUS_COMPLETED = 2,
+};
+
+struct amdgpu_ring_mux {
+ struct amdgpu_ring *real_ring;
+
+ struct amdgpu_mux_entry *ring_entry;
+ unsigned int num_ring_entries;
+ unsigned int ring_entry_size;
+ /*the lock for copy data from different software rings*/
+ spinlock_t lock;
+ bool s_resubmit;
+ uint32_t seqno_to_resubmit;
+ u64 wptr_resubmit;
+ struct timer_list resubmit_timer;
+
+ bool pending_trailing_fence_signaled;
+};
+
+/**
+ * struct amdgpu_mux_chunk - save the location of indirect buffer's package on softare rings.
+ * @entry: the list entry.
+ * @sync_seq: the fence seqno related with the saved IB.
+ * @start:- start location on the software ring.
+ * @end:- end location on the software ring.
+ * @control_offset:- the PRE_RESUME bit position used for resubmission.
+ * @de_offset:- the anchor in write_data for de meta of resubmission.
+ * @ce_offset:- the anchor in write_data for ce meta of resubmission.
+ */
+struct amdgpu_mux_chunk {
+ struct list_head entry;
+ uint32_t sync_seq;
+ u64 start;
+ u64 end;
+ u64 cntl_offset;
+ u64 de_offset;
+ u64 ce_offset;
+};
+
+int amdgpu_ring_mux_init(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring,
+ unsigned int entry_size);
+void amdgpu_ring_mux_fini(struct amdgpu_ring_mux *mux);
+int amdgpu_ring_mux_add_sw_ring(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring);
+void amdgpu_ring_mux_set_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring, u64 wptr);
+u64 amdgpu_ring_mux_get_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring);
+u64 amdgpu_ring_mux_get_rptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring);
+void amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring);
+void amdgpu_ring_mux_end_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring);
+void amdgpu_ring_mux_ib_mark_offset(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring,
+ u64 offset, enum amdgpu_ring_mux_offset_type type);
+bool amdgpu_mcbp_handle_trailing_fence_irq(struct amdgpu_ring_mux *mux);
+
+u64 amdgpu_sw_ring_get_rptr_gfx(struct amdgpu_ring *ring);
+u64 amdgpu_sw_ring_get_wptr_gfx(struct amdgpu_ring *ring);
+void amdgpu_sw_ring_set_wptr_gfx(struct amdgpu_ring *ring);
+void amdgpu_sw_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
+void amdgpu_sw_ring_ib_begin(struct amdgpu_ring *ring);
+void amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring);
+void amdgpu_sw_ring_ib_mark_offset(struct amdgpu_ring *ring, enum amdgpu_ring_mux_offset_type type);
+const char *amdgpu_sw_ring_name(int idx);
+unsigned int amdgpu_sw_ring_priority(int idx);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
new file mode 100644
index 000000000000..5aa830a02d80
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -0,0 +1,550 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_gfx.h"
+#include "amdgpu_rlc.h"
+
+/**
+ * amdgpu_gfx_rlc_enter_safe_mode - Set RLC into safe mode
+ *
+ * @adev: amdgpu_device pointer
+ * @xcc_id: xcc accelerated compute core id
+ *
+ * Set RLC enter into safe mode if RLC is enabled and haven't in safe mode.
+ */
+void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev, int xcc_id)
+{
+ if (adev->gfx.rlc.in_safe_mode[xcc_id])
+ return;
+
+ /* if RLC is not enabled, do nothing */
+ if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
+ return;
+
+ if (adev->cg_flags &
+ (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+ adev->gfx.rlc.funcs->set_safe_mode(adev, xcc_id);
+ adev->gfx.rlc.in_safe_mode[xcc_id] = true;
+ }
+}
+
+/**
+ * amdgpu_gfx_rlc_exit_safe_mode - Set RLC out of safe mode
+ *
+ * @adev: amdgpu_device pointer
+ * @xcc_id: xcc accelerated compute core id
+ *
+ * Set RLC exit safe mode if RLC is enabled and have entered into safe mode.
+ */
+void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev, int xcc_id)
+{
+ if (!(adev->gfx.rlc.in_safe_mode[xcc_id]))
+ return;
+
+ /* if RLC is not enabled, do nothing */
+ if (!adev->gfx.rlc.funcs->is_rlc_enabled(adev))
+ return;
+
+ if (adev->cg_flags &
+ (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_3D_CGCG)) {
+ adev->gfx.rlc.funcs->unset_safe_mode(adev, xcc_id);
+ adev->gfx.rlc.in_safe_mode[xcc_id] = false;
+ }
+}
+
+/**
+ * amdgpu_gfx_rlc_init_sr - Init save restore block
+ *
+ * @adev: amdgpu_device pointer
+ * @dws: the size of save restore block
+ *
+ * Allocate and setup value to save restore block of rlc.
+ * Returns 0 on succeess or negative error code if allocate failed.
+ */
+int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
+{
+ const u32 *src_ptr;
+ u32 *dst_ptr;
+ u32 i;
+ int r;
+
+ /* allocate save restore block */
+ r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.rlc.save_restore_obj,
+ &adev->gfx.rlc.save_restore_gpu_addr,
+ (void **)&adev->gfx.rlc.sr_ptr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
+ amdgpu_gfx_rlc_fini(adev);
+ return r;
+ }
+
+ /* write the sr buffer */
+ src_ptr = adev->gfx.rlc.reg_list;
+ dst_ptr = adev->gfx.rlc.sr_ptr;
+ for (i = 0; i < adev->gfx.rlc.reg_list_size; i++)
+ dst_ptr[i] = cpu_to_le32(src_ptr[i]);
+ amdgpu_bo_kunmap(adev->gfx.rlc.save_restore_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.save_restore_obj);
+
+ return 0;
+}
+
+/**
+ * amdgpu_gfx_rlc_init_csb - Init clear state block
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate and setup value to clear state block of rlc.
+ * Returns 0 on succeess or negative error code if allocate failed.
+ */
+int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
+{
+ u32 dws;
+ int r;
+
+ /* allocate clear state block */
+ adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev);
+ r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to create rlc csb bo\n", r);
+ amdgpu_gfx_rlc_fini(adev);
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_gfx_rlc_init_cpt - Init cp table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate and setup value to cp table of rlc.
+ * Returns 0 on succeess or negative error code if allocate failed.
+ */
+int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to create cp table bo\n", r);
+ amdgpu_gfx_rlc_fini(adev);
+ return r;
+ }
+
+ /* set up the cp table */
+ amdgpu_gfx_rlc_setup_cp_table(adev);
+ amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
+ amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
+
+ return 0;
+}
+
+/**
+ * amdgpu_gfx_rlc_setup_cp_table - setup cp the buffer of cp table
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Write cp firmware data into cp table.
+ */
+void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev)
+{
+ const __le32 *fw_data;
+ u32 *dst_ptr;
+ int me, i, max_me;
+ u32 bo_offset = 0;
+ u32 table_offset, table_size;
+
+ max_me = adev->gfx.rlc.funcs->get_cp_table_num(adev);
+
+ /* write the cp table buffer */
+ dst_ptr = adev->gfx.rlc.cp_table_ptr;
+ for (me = 0; me < max_me; me++) {
+ if (me == 0) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.ce_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 1) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.pfp_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 2) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.me_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 3) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.mec_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ } else if (me == 4) {
+ const struct gfx_firmware_header_v1_0 *hdr =
+ (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
+ fw_data = (const __le32 *)
+ (adev->gfx.mec2_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ table_offset = le32_to_cpu(hdr->jt_offset);
+ table_size = le32_to_cpu(hdr->jt_size);
+ }
+
+ for (i = 0; i < table_size; i++) {
+ dst_ptr[bo_offset + i] =
+ cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
+ }
+
+ bo_offset += table_size;
+ }
+}
+
+/**
+ * amdgpu_gfx_rlc_fini - Free BO which used for RLC
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Free three BO which is used for rlc_save_restore_block, rlc_clear_state_block
+ * and rlc_jump_table_block.
+ */
+void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev)
+{
+ /* save restore block */
+ if (adev->gfx.rlc.save_restore_obj) {
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.save_restore_obj,
+ &adev->gfx.rlc.save_restore_gpu_addr,
+ (void **)&adev->gfx.rlc.sr_ptr);
+ }
+
+ /* clear state block */
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+
+ /* jump table block */
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+}
+
+static int amdgpu_gfx_rlc_init_microcode_v2_0(struct amdgpu_device *adev)
+{
+ const struct common_firmware_header *common_hdr;
+ const struct rlc_firmware_header_v2_0 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+ unsigned int *tmp;
+ unsigned int i;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
+
+ adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
+ adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
+ adev->gfx.rlc.save_and_restore_offset =
+ le32_to_cpu(rlc_hdr->save_and_restore_offset);
+ adev->gfx.rlc.clear_state_descriptor_offset =
+ le32_to_cpu(rlc_hdr->clear_state_descriptor_offset);
+ adev->gfx.rlc.avail_scratch_ram_locations =
+ le32_to_cpu(rlc_hdr->avail_scratch_ram_locations);
+ adev->gfx.rlc.reg_restore_list_size =
+ le32_to_cpu(rlc_hdr->reg_restore_list_size);
+ adev->gfx.rlc.reg_list_format_start =
+ le32_to_cpu(rlc_hdr->reg_list_format_start);
+ adev->gfx.rlc.reg_list_format_separate_start =
+ le32_to_cpu(rlc_hdr->reg_list_format_separate_start);
+ adev->gfx.rlc.starting_offsets_start =
+ le32_to_cpu(rlc_hdr->starting_offsets_start);
+ adev->gfx.rlc.reg_list_format_size_bytes =
+ le32_to_cpu(rlc_hdr->reg_list_format_size_bytes);
+ adev->gfx.rlc.reg_list_size_bytes =
+ le32_to_cpu(rlc_hdr->reg_list_size_bytes);
+ adev->gfx.rlc.register_list_format =
+ kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
+ adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
+ if (!adev->gfx.rlc.register_list_format) {
+ dev_err(adev->dev, "failed to allocate memory for rlc register_list_format\n");
+ return -ENOMEM;
+ }
+
+ tmp = (unsigned int *)((uintptr_t)rlc_hdr +
+ le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
+ for (i = 0 ; i < (rlc_hdr->reg_list_format_size_bytes >> 2); i++)
+ adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
+
+ adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
+
+ tmp = (unsigned int *)((uintptr_t)rlc_hdr +
+ le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
+ for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
+ adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_G;
+ info->fw = adev->gfx.rlc_fw;
+ if (info->fw) {
+ common_hdr = (const struct common_firmware_header *)info->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(common_hdr->ucode_size_bytes), PAGE_SIZE);
+ }
+ }
+
+ return 0;
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_1(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_1 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver);
+ adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver);
+ adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes);
+ adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes);
+ adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver);
+ adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver);
+ adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes);
+ adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes);
+ adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver);
+ adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver);
+ adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes);
+ adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes);
+ adev->gfx.rlc.reg_list_format_direct_reg_list_length =
+ le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.save_restore_list_cntl_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.save_restore_list_gpm_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.save_restore_list_srm_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_2(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_2 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_2 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc.rlc_iram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_iram_ucode_size_bytes);
+ adev->gfx.rlc.rlc_iram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_iram_ucode_offset_bytes);
+ adev->gfx.rlc.rlc_dram_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlc_dram_ucode_size_bytes);
+ adev->gfx.rlc.rlc_dram_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlc_dram_ucode_offset_bytes);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.rlc_iram_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_IRAM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_IRAM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlc_iram_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.rlc_dram_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_DRAM];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_DRAM;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlc_dram_ucode_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_3(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_3 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_3 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlcp_ucode_version = le32_to_cpu(rlc_hdr->rlcp_ucode_version);
+ adev->gfx.rlcp_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcp_ucode_feature_version);
+ adev->gfx.rlc.rlcp_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcp_ucode_size_bytes);
+ adev->gfx.rlc.rlcp_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcp_ucode_offset_bytes);
+
+ adev->gfx.rlcv_ucode_version = le32_to_cpu(rlc_hdr->rlcv_ucode_version);
+ adev->gfx.rlcv_ucode_feature_version = le32_to_cpu(rlc_hdr->rlcv_ucode_feature_version);
+ adev->gfx.rlc.rlcv_ucode_size_bytes = le32_to_cpu(rlc_hdr->rlcv_ucode_size_bytes);
+ adev->gfx.rlc.rlcv_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->rlcv_ucode_offset_bytes);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.rlcp_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_P];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_P;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlcp_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.rlcv_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_V];
+ info->ucode_id = AMDGPU_UCODE_ID_RLC_V;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.rlcv_ucode_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+static void amdgpu_gfx_rlc_init_microcode_v2_4(struct amdgpu_device *adev)
+{
+ const struct rlc_firmware_header_v2_4 *rlc_hdr;
+ struct amdgpu_firmware_info *info;
+
+ rlc_hdr = (const struct rlc_firmware_header_v2_4 *)adev->gfx.rlc_fw->data;
+ adev->gfx.rlc.global_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->global_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.global_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->global_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se0_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se0_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se0_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se1_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se1_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se1_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se2_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se2_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se2_tap_delays_ucode_offset_bytes);
+ adev->gfx.rlc.se3_tap_delays_ucode_size_bytes = le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_size_bytes);
+ adev->gfx.rlc.se3_tap_delays_ucode = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->se3_tap_delays_ucode_offset_bytes);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ if (adev->gfx.rlc.global_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se0_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se1_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se2_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+
+ if (adev->gfx.rlc.se3_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
+ }
+}
+
+int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev,
+ uint16_t version_major,
+ uint16_t version_minor)
+{
+ int err;
+
+ if (version_major < 2) {
+ /* only support rlc_hdr v2.x and onwards */
+ dev_err(adev->dev, "unsupported rlc fw hdr\n");
+ return -EINVAL;
+ }
+
+ /* is_rlc_v2_1 is still used in APU code path */
+ if (version_major == 2 && version_minor == 1)
+ adev->gfx.rlc.is_rlc_v2_1 = true;
+
+ err = amdgpu_gfx_rlc_init_microcode_v2_0(adev);
+ if (err) {
+ dev_err(adev->dev, "fail to init rlc v2_0 microcode\n");
+ return err;
+ }
+
+ if (version_minor >= 1)
+ amdgpu_gfx_rlc_init_microcode_v2_1(adev);
+ if (version_minor >= 2)
+ amdgpu_gfx_rlc_init_microcode_v2_2(adev);
+ if (version_minor == 3)
+ amdgpu_gfx_rlc_init_microcode_v2_3(adev);
+ if (version_minor == 4)
+ amdgpu_gfx_rlc_init_microcode_v2_4(adev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
new file mode 100644
index 000000000000..2ce310b31942
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -0,0 +1,363 @@
+/*
+ * Copyright 2014 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_RLC_H__
+#define __AMDGPU_RLC_H__
+
+#include "clearstate_defs.h"
+
+#define AMDGPU_MAX_RLC_INSTANCES 8
+
+/* firmware ID used in rlc toc */
+typedef enum _FIRMWARE_ID_ {
+ FIRMWARE_ID_INVALID = 0,
+ FIRMWARE_ID_RLC_G_UCODE = 1,
+ FIRMWARE_ID_RLC_TOC = 2,
+ FIRMWARE_ID_RLCG_SCRATCH = 3,
+ FIRMWARE_ID_RLC_SRM_ARAM = 4,
+ FIRMWARE_ID_RLC_SRM_INDEX_ADDR = 5,
+ FIRMWARE_ID_RLC_SRM_INDEX_DATA = 6,
+ FIRMWARE_ID_RLC_P_UCODE = 7,
+ FIRMWARE_ID_RLC_V_UCODE = 8,
+ FIRMWARE_ID_RLX6_UCODE = 9,
+ FIRMWARE_ID_RLX6_DRAM_BOOT = 10,
+ FIRMWARE_ID_GLOBAL_TAP_DELAYS = 11,
+ FIRMWARE_ID_SE0_TAP_DELAYS = 12,
+ FIRMWARE_ID_SE1_TAP_DELAYS = 13,
+ FIRMWARE_ID_GLOBAL_SE0_SE1_SKEW_DELAYS = 14,
+ FIRMWARE_ID_SDMA0_UCODE = 15,
+ FIRMWARE_ID_SDMA0_JT = 16,
+ FIRMWARE_ID_SDMA1_UCODE = 17,
+ FIRMWARE_ID_SDMA1_JT = 18,
+ FIRMWARE_ID_CP_CE = 19,
+ FIRMWARE_ID_CP_PFP = 20,
+ FIRMWARE_ID_CP_ME = 21,
+ FIRMWARE_ID_CP_MEC = 22,
+ FIRMWARE_ID_CP_MES = 23,
+ FIRMWARE_ID_MES_STACK = 24,
+ FIRMWARE_ID_RLC_SRM_DRAM_SR = 25,
+ FIRMWARE_ID_RLCG_SCRATCH_SR = 26,
+ FIRMWARE_ID_RLCP_SCRATCH_SR = 27,
+ FIRMWARE_ID_RLCV_SCRATCH_SR = 28,
+ FIRMWARE_ID_RLX6_DRAM_SR = 29,
+ FIRMWARE_ID_SDMA0_PG_CONTEXT = 30,
+ FIRMWARE_ID_SDMA1_PG_CONTEXT = 31,
+ FIRMWARE_ID_GLOBAL_MUX_SELECT_RAM = 32,
+ FIRMWARE_ID_SE0_MUX_SELECT_RAM = 33,
+ FIRMWARE_ID_SE1_MUX_SELECT_RAM = 34,
+ FIRMWARE_ID_ACCUM_CTRL_RAM = 35,
+ FIRMWARE_ID_RLCP_CAM = 36,
+ FIRMWARE_ID_RLC_SPP_CAM_EXT = 37,
+ FIRMWARE_ID_MAX = 38,
+} FIRMWARE_ID;
+
+typedef enum _SOC21_FIRMWARE_ID_ {
+ SOC21_FIRMWARE_ID_INVALID = 0,
+ SOC21_FIRMWARE_ID_RLC_G_UCODE = 1,
+ SOC21_FIRMWARE_ID_RLC_TOC = 2,
+ SOC21_FIRMWARE_ID_RLCG_SCRATCH = 3,
+ SOC21_FIRMWARE_ID_RLC_SRM_ARAM = 4,
+ SOC21_FIRMWARE_ID_RLC_P_UCODE = 5,
+ SOC21_FIRMWARE_ID_RLC_V_UCODE = 6,
+ SOC21_FIRMWARE_ID_RLX6_UCODE = 7,
+ SOC21_FIRMWARE_ID_RLX6_UCODE_CORE1 = 8,
+ SOC21_FIRMWARE_ID_RLX6_DRAM_BOOT = 9,
+ SOC21_FIRMWARE_ID_RLX6_DRAM_BOOT_CORE1 = 10,
+ SOC21_FIRMWARE_ID_SDMA_UCODE_TH0 = 11,
+ SOC21_FIRMWARE_ID_SDMA_UCODE_TH1 = 12,
+ SOC21_FIRMWARE_ID_CP_PFP = 13,
+ SOC21_FIRMWARE_ID_CP_ME = 14,
+ SOC21_FIRMWARE_ID_CP_MEC = 15,
+ SOC21_FIRMWARE_ID_RS64_MES_P0 = 16,
+ SOC21_FIRMWARE_ID_RS64_MES_P1 = 17,
+ SOC21_FIRMWARE_ID_RS64_PFP = 18,
+ SOC21_FIRMWARE_ID_RS64_ME = 19,
+ SOC21_FIRMWARE_ID_RS64_MEC = 20,
+ SOC21_FIRMWARE_ID_RS64_MES_P0_STACK = 21,
+ SOC21_FIRMWARE_ID_RS64_MES_P1_STACK = 22,
+ SOC21_FIRMWARE_ID_RS64_PFP_P0_STACK = 23,
+ SOC21_FIRMWARE_ID_RS64_PFP_P1_STACK = 24,
+ SOC21_FIRMWARE_ID_RS64_ME_P0_STACK = 25,
+ SOC21_FIRMWARE_ID_RS64_ME_P1_STACK = 26,
+ SOC21_FIRMWARE_ID_RS64_MEC_P0_STACK = 27,
+ SOC21_FIRMWARE_ID_RS64_MEC_P1_STACK = 28,
+ SOC21_FIRMWARE_ID_RS64_MEC_P2_STACK = 29,
+ SOC21_FIRMWARE_ID_RS64_MEC_P3_STACK = 30,
+ SOC21_FIRMWARE_ID_RLC_SRM_DRAM_SR = 31,
+ SOC21_FIRMWARE_ID_RLCG_SCRATCH_SR = 32,
+ SOC21_FIRMWARE_ID_RLCP_SCRATCH_SR = 33,
+ SOC21_FIRMWARE_ID_RLCV_SCRATCH_SR = 34,
+ SOC21_FIRMWARE_ID_RLX6_DRAM_SR = 35,
+ SOC21_FIRMWARE_ID_RLX6_DRAM_SR_CORE1 = 36,
+ SOC21_FIRMWARE_ID_MAX = 37
+} SOC21_FIRMWARE_ID;
+
+typedef enum _SOC24_FIRMWARE_ID_ {
+ SOC24_FIRMWARE_ID_INVALID = 0,
+ SOC24_FIRMWARE_ID_RLC_G_UCODE = 1,
+ SOC24_FIRMWARE_ID_RLC_TOC = 2,
+ SOC24_FIRMWARE_ID_RLCG_SCRATCH = 3,
+ SOC24_FIRMWARE_ID_RLC_SRM_ARAM = 4,
+ SOC24_FIRMWARE_ID_RLC_P_UCODE = 5,
+ SOC24_FIRMWARE_ID_RLC_V_UCODE = 6,
+ SOC24_FIRMWARE_ID_RLX6_UCODE = 7,
+ SOC24_FIRMWARE_ID_RLX6_UCODE_CORE1 = 8,
+ SOC24_FIRMWARE_ID_RLX6_DRAM_BOOT = 9,
+ SOC24_FIRMWARE_ID_RLX6_DRAM_BOOT_CORE1 = 10,
+ SOC24_FIRMWARE_ID_SDMA_UCODE_TH0 = 11,
+ SOC24_FIRMWARE_ID_SDMA_UCODE_TH1 = 12,
+ SOC24_FIRMWARE_ID_CP_PFP = 13,
+ SOC24_FIRMWARE_ID_CP_ME = 14,
+ SOC24_FIRMWARE_ID_CP_MEC = 15,
+ SOC24_FIRMWARE_ID_RS64_MES_P0 = 16,
+ SOC24_FIRMWARE_ID_RS64_MES_P1 = 17,
+ SOC24_FIRMWARE_ID_RS64_PFP = 18,
+ SOC24_FIRMWARE_ID_RS64_ME = 19,
+ SOC24_FIRMWARE_ID_RS64_MEC = 20,
+ SOC24_FIRMWARE_ID_RS64_MES_P0_STACK = 21,
+ SOC24_FIRMWARE_ID_RS64_MES_P1_STACK = 22,
+ SOC24_FIRMWARE_ID_RS64_PFP_P0_STACK = 23,
+ SOC24_FIRMWARE_ID_RS64_PFP_P1_STACK = 24,
+ SOC24_FIRMWARE_ID_RS64_ME_P0_STACK = 25,
+ SOC24_FIRMWARE_ID_RS64_ME_P1_STACK = 26,
+ SOC24_FIRMWARE_ID_RS64_MEC_P0_STACK = 27,
+ SOC24_FIRMWARE_ID_RS64_MEC_P1_STACK = 28,
+ SOC24_FIRMWARE_ID_RS64_MEC_P2_STACK = 29,
+ SOC24_FIRMWARE_ID_RS64_MEC_P3_STACK = 30,
+ SOC24_FIRMWARE_ID_RLC_SRM_DRAM_SR = 31,
+ SOC24_FIRMWARE_ID_RLCG_SCRATCH_SR = 32,
+ SOC24_FIRMWARE_ID_RLCP_SCRATCH_SR = 33,
+ SOC24_FIRMWARE_ID_RLCV_SCRATCH_SR = 34,
+ SOC24_FIRMWARE_ID_RLX6_DRAM_SR = 35,
+ SOC24_FIRMWARE_ID_RLX6_DRAM_SR_CORE1 = 36,
+ SOC24_FIRMWARE_ID_RLCDEBUGLOG = 37,
+ SOC24_FIRMWARE_ID_SRIOV_DEBUG = 38,
+ SOC24_FIRMWARE_ID_SRIOV_CSA_RLC = 39,
+ SOC24_FIRMWARE_ID_SRIOV_CSA_SDMA = 40,
+ SOC24_FIRMWARE_ID_SRIOV_CSA_CP = 41,
+ SOC24_FIRMWARE_ID_UMF_ZONE_PAD = 42,
+ SOC24_FIRMWARE_ID_MAX = 43
+} SOC24_FIRMWARE_ID;
+
+typedef struct _RLC_TABLE_OF_CONTENT {
+ union {
+ unsigned int DW0;
+ struct {
+ unsigned int offset : 25;
+ unsigned int id : 7;
+ };
+ };
+
+ union {
+ unsigned int DW1;
+ struct {
+ unsigned int load_at_boot : 1;
+ unsigned int load_at_vddgfx : 1;
+ unsigned int load_at_reset : 1;
+ unsigned int memory_destination : 2;
+ unsigned int vfflr_image_code : 4;
+ unsigned int load_mode_direct : 1;
+ unsigned int save_for_vddgfx : 1;
+ unsigned int save_for_vfflr : 1;
+ unsigned int reserved : 1;
+ unsigned int signed_source : 1;
+ unsigned int size : 18;
+ };
+ };
+
+ union {
+ unsigned int DW2;
+ struct {
+ unsigned int indirect_addr_reg : 16;
+ unsigned int index : 16;
+ };
+ };
+
+ union {
+ unsigned int DW3;
+ struct {
+ unsigned int indirect_data_reg : 16;
+ unsigned int indirect_start_offset : 16;
+ };
+ };
+} RLC_TABLE_OF_CONTENT;
+
+typedef struct _RLC_TABLE_OF_CONTENT_V2 {
+ union {
+ unsigned int DW0;
+ struct {
+ uint32_t offset : 25;
+ uint32_t id : 7;
+ };
+ };
+
+ union {
+ unsigned int DW1;
+ struct {
+ uint32_t reserved0 : 1;
+ uint32_t reserved1 : 1;
+ uint32_t reserved2 : 1;
+ uint32_t memory_destination : 2;
+ uint32_t vfflr_image_code : 4;
+ uint32_t reserved9 : 1;
+ uint32_t reserved10 : 1;
+ uint32_t reserved11 : 1;
+ uint32_t size_x16 : 1;
+ uint32_t reserved13 : 1;
+ uint32_t size : 18;
+ };
+ };
+} RLC_TABLE_OF_CONTENT_V2;
+
+#define RLC_TOC_MAX_SIZE 64
+
+struct amdgpu_rlc_funcs {
+ bool (*is_rlc_enabled)(struct amdgpu_device *adev);
+ void (*set_safe_mode)(struct amdgpu_device *adev, int xcc_id);
+ void (*unset_safe_mode)(struct amdgpu_device *adev, int xcc_id);
+ int (*init)(struct amdgpu_device *adev);
+ u32 (*get_csb_size)(struct amdgpu_device *adev);
+
+ /**
+ * @get_csb_buffer: Get the clear state to be put into the hardware.
+ *
+ * The parameter adev is used to get the CS data and other gfx info,
+ * and buffer is the RLC CS pointer
+ *
+ * Sometimes, the user space puts a request to clear the state in the
+ * command buffer; this function provides the clear state that gets put
+ * into the hardware. Note that the driver programs Clear State
+ * Indirect Buffer (CSB) explicitly when it sets up the kernel rings,
+ * and it also provides a pointer to it which is used by the firmware
+ * to load the clear state in some cases.
+ */
+ void (*get_csb_buffer)(struct amdgpu_device *adev, u32 *buffer);
+ int (*get_cp_table_num)(struct amdgpu_device *adev);
+ int (*resume)(struct amdgpu_device *adev);
+ void (*stop)(struct amdgpu_device *adev);
+ void (*reset)(struct amdgpu_device *adev);
+ void (*start)(struct amdgpu_device *adev);
+ void (*update_spm_vmid)(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned vmid);
+ bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg);
+};
+
+struct amdgpu_rlcg_reg_access_ctrl {
+ uint32_t scratch_reg0;
+ uint32_t scratch_reg1;
+ uint32_t scratch_reg2;
+ uint32_t scratch_reg3;
+ uint32_t grbm_cntl;
+ uint32_t grbm_idx;
+ uint32_t spare_int;
+};
+
+struct amdgpu_rlc {
+ /* for power gating */
+ struct amdgpu_bo *save_restore_obj;
+ uint64_t save_restore_gpu_addr;
+ uint32_t *sr_ptr;
+ const u32 *reg_list;
+ u32 reg_list_size;
+ /* for clear state */
+ struct amdgpu_bo *clear_state_obj;
+ uint64_t clear_state_gpu_addr;
+ uint32_t *cs_ptr;
+ const struct cs_section_def *cs_data;
+ u32 clear_state_size;
+ /* for cp tables */
+ struct amdgpu_bo *cp_table_obj;
+ uint64_t cp_table_gpu_addr;
+ uint32_t *cp_table_ptr;
+ u32 cp_table_size;
+
+ /* safe mode for updating CG/PG state */
+ bool in_safe_mode[AMDGPU_MAX_RLC_INSTANCES];
+ const struct amdgpu_rlc_funcs *funcs;
+
+ /* for firmware data */
+ u32 save_and_restore_offset;
+ u32 clear_state_descriptor_offset;
+ u32 avail_scratch_ram_locations;
+ u32 reg_restore_list_size;
+ u32 reg_list_format_start;
+ u32 reg_list_format_separate_start;
+ u32 starting_offsets_start;
+ u32 reg_list_format_size_bytes;
+ u32 reg_list_size_bytes;
+ u32 reg_list_format_direct_reg_list_length;
+ u32 save_restore_list_cntl_size_bytes;
+ u32 save_restore_list_gpm_size_bytes;
+ u32 save_restore_list_srm_size_bytes;
+ u32 rlc_iram_ucode_size_bytes;
+ u32 rlc_dram_ucode_size_bytes;
+ u32 rlcp_ucode_size_bytes;
+ u32 rlcv_ucode_size_bytes;
+ u32 global_tap_delays_ucode_size_bytes;
+ u32 se0_tap_delays_ucode_size_bytes;
+ u32 se1_tap_delays_ucode_size_bytes;
+ u32 se2_tap_delays_ucode_size_bytes;
+ u32 se3_tap_delays_ucode_size_bytes;
+
+ u32 *register_list_format;
+ u32 *register_restore;
+ u8 *save_restore_list_cntl;
+ u8 *save_restore_list_gpm;
+ u8 *save_restore_list_srm;
+ u8 *rlc_iram_ucode;
+ u8 *rlc_dram_ucode;
+ u8 *rlcp_ucode;
+ u8 *rlcv_ucode;
+ u8 *global_tap_delays_ucode;
+ u8 *se0_tap_delays_ucode;
+ u8 *se1_tap_delays_ucode;
+ u8 *se2_tap_delays_ucode;
+ u8 *se3_tap_delays_ucode;
+
+ bool is_rlc_v2_1;
+
+ /* for rlc autoload */
+ struct amdgpu_bo *rlc_autoload_bo;
+ u64 rlc_autoload_gpu_addr;
+ void *rlc_autoload_ptr;
+
+ /* rlc toc buffer */
+ struct amdgpu_bo *rlc_toc_bo;
+ uint64_t rlc_toc_gpu_addr;
+ void *rlc_toc_buf;
+
+ bool rlcg_reg_access_supported;
+ /* registers for rlcg indirect reg access */
+ struct amdgpu_rlcg_reg_access_ctrl reg_access_ctrl[AMDGPU_MAX_RLC_INSTANCES];
+};
+
+void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev, int xcc_id);
+void amdgpu_gfx_rlc_exit_safe_mode(struct amdgpu_device *adev, int xcc_id);
+int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws);
+int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev);
+int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev);
+void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev);
+void amdgpu_gfx_rlc_fini(struct amdgpu_device *adev);
+int amdgpu_gfx_rlc_init_microcode(struct amdgpu_device *adev,
+ uint16_t version_major,
+ uint16_t version_minor);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 5ca75a456ad2..39070b2a4c04 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -41,370 +41,65 @@
* If we are asked to block we wait on all the oldest fence of all
* rings. We just wait for any of those fence to complete.
*/
-#include <drm/drmP.h>
-#include "amdgpu.h"
-static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
-static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
+#include "amdgpu.h"
int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager,
- unsigned size, u32 align, u32 domain)
+ unsigned int size, u32 suballoc_align, u32 domain)
{
- int i, r;
-
- init_waitqueue_head(&sa_manager->wq);
- sa_manager->bo = NULL;
- sa_manager->size = size;
- sa_manager->domain = domain;
- sa_manager->align = align;
- sa_manager->hole = &sa_manager->olist;
- INIT_LIST_HEAD(&sa_manager->olist);
- for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
- INIT_LIST_HEAD(&sa_manager->flist[i]);
+ int r;
- r = amdgpu_bo_create(adev, size, align, true, domain,
- 0, NULL, NULL, &sa_manager->bo);
+ r = amdgpu_bo_create_kernel(adev, size, AMDGPU_GPU_PAGE_SIZE, domain,
+ &sa_manager->bo, &sa_manager->gpu_addr,
+ &sa_manager->cpu_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
}
+ memset(sa_manager->cpu_ptr, 0, size);
+ drm_suballoc_manager_init(&sa_manager->base, size, suballoc_align);
return r;
}
void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager)
{
- struct amdgpu_sa_bo *sa_bo, *tmp;
-
- if (!list_empty(&sa_manager->olist)) {
- sa_manager->hole = &sa_manager->olist,
- amdgpu_sa_bo_try_free(sa_manager);
- if (!list_empty(&sa_manager->olist)) {
- dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
- }
- }
- list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
- amdgpu_sa_bo_remove_locked(sa_bo);
- }
- amdgpu_bo_unref(&sa_manager->bo);
- sa_manager->size = 0;
-}
-
-int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
- struct amdgpu_sa_manager *sa_manager)
-{
- int r;
-
- if (sa_manager->bo == NULL) {
- dev_err(adev->dev, "no bo for sa manager\n");
- return -EINVAL;
- }
-
- /* map the buffer */
- r = amdgpu_bo_reserve(sa_manager->bo, false);
- if (r) {
- dev_err(adev->dev, "(%d) failed to reserve manager bo\n", r);
- return r;
- }
- r = amdgpu_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
- if (r) {
- amdgpu_bo_unreserve(sa_manager->bo);
- dev_err(adev->dev, "(%d) failed to pin manager bo\n", r);
- return r;
- }
- r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
- memset(sa_manager->cpu_ptr, 0, sa_manager->size);
- amdgpu_bo_unreserve(sa_manager->bo);
- return r;
-}
-
-int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
- struct amdgpu_sa_manager *sa_manager)
-{
- int r;
-
if (sa_manager->bo == NULL) {
dev_err(adev->dev, "no bo for sa manager\n");
- return -EINVAL;
- }
-
- r = amdgpu_bo_reserve(sa_manager->bo, true);
- if (!r) {
- amdgpu_bo_kunmap(sa_manager->bo);
- amdgpu_bo_unpin(sa_manager->bo);
- amdgpu_bo_unreserve(sa_manager->bo);
- }
- return r;
-}
-
-static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
-{
- struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
- if (sa_manager->hole == &sa_bo->olist) {
- sa_manager->hole = sa_bo->olist.prev;
- }
- list_del_init(&sa_bo->olist);
- list_del_init(&sa_bo->flist);
- dma_fence_put(sa_bo->fence);
- kfree(sa_bo);
-}
-
-static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
-{
- struct amdgpu_sa_bo *sa_bo, *tmp;
-
- if (sa_manager->hole->next == &sa_manager->olist)
return;
-
- sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
- list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
- if (sa_bo->fence == NULL ||
- !dma_fence_is_signaled(sa_bo->fence)) {
- return;
- }
- amdgpu_sa_bo_remove_locked(sa_bo);
- }
-}
-
-static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
-{
- struct list_head *hole = sa_manager->hole;
-
- if (hole != &sa_manager->olist) {
- return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
- }
- return 0;
-}
-
-static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
-{
- struct list_head *hole = sa_manager->hole;
-
- if (hole->next != &sa_manager->olist) {
- return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
- }
- return sa_manager->size;
-}
-
-static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
- struct amdgpu_sa_bo *sa_bo,
- unsigned size, unsigned align)
-{
- unsigned soffset, eoffset, wasted;
-
- soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
- eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
- wasted = (align - (soffset % align)) % align;
-
- if ((eoffset - soffset) >= (size + wasted)) {
- soffset += wasted;
-
- sa_bo->manager = sa_manager;
- sa_bo->soffset = soffset;
- sa_bo->eoffset = soffset + size;
- list_add(&sa_bo->olist, sa_manager->hole);
- INIT_LIST_HEAD(&sa_bo->flist);
- sa_manager->hole = &sa_bo->olist;
- return true;
- }
- return false;
-}
-
-/**
- * amdgpu_sa_event - Check if we can stop waiting
- *
- * @sa_manager: pointer to the sa_manager
- * @size: number of bytes we want to allocate
- * @align: alignment we need to match
- *
- * Check if either there is a fence we can wait for or
- * enough free memory to satisfy the allocation directly
- */
-static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
- unsigned size, unsigned align)
-{
- unsigned soffset, eoffset, wasted;
- int i;
-
- for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
- if (!list_empty(&sa_manager->flist[i]))
- return true;
-
- soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
- eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
- wasted = (align - (soffset % align)) % align;
-
- if ((eoffset - soffset) >= (size + wasted)) {
- return true;
- }
-
- return false;
-}
-
-static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
- struct dma_fence **fences,
- unsigned *tries)
-{
- struct amdgpu_sa_bo *best_bo = NULL;
- unsigned i, soffset, best, tmp;
-
- /* if hole points to the end of the buffer */
- if (sa_manager->hole->next == &sa_manager->olist) {
- /* try again with its beginning */
- sa_manager->hole = &sa_manager->olist;
- return true;
- }
-
- soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
- /* to handle wrap around we add sa_manager->size */
- best = sa_manager->size * 2;
- /* go over all fence list and try to find the closest sa_bo
- * of the current last
- */
- for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
- struct amdgpu_sa_bo *sa_bo;
-
- if (list_empty(&sa_manager->flist[i]))
- continue;
-
- sa_bo = list_first_entry(&sa_manager->flist[i],
- struct amdgpu_sa_bo, flist);
-
- if (!dma_fence_is_signaled(sa_bo->fence)) {
- fences[i] = sa_bo->fence;
- continue;
- }
-
- /* limit the number of tries each ring gets */
- if (tries[i] > 2) {
- continue;
- }
-
- tmp = sa_bo->soffset;
- if (tmp < soffset) {
- /* wrap around, pretend it's after */
- tmp += sa_manager->size;
- }
- tmp -= soffset;
- if (tmp < best) {
- /* this sa bo is the closest one */
- best = tmp;
- best_bo = sa_bo;
- }
}
- if (best_bo) {
- uint32_t idx = best_bo->fence->context;
-
- idx %= AMDGPU_SA_NUM_FENCE_LISTS;
- ++tries[idx];
- sa_manager->hole = best_bo->olist.prev;
+ drm_suballoc_manager_fini(&sa_manager->base);
- /* we knew that this one is signaled,
- so it's save to remote it */
- amdgpu_sa_bo_remove_locked(best_bo);
- return true;
- }
- return false;
+ amdgpu_bo_free_kernel(&sa_manager->bo, &sa_manager->gpu_addr, &sa_manager->cpu_ptr);
}
int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
- struct amdgpu_sa_bo **sa_bo,
- unsigned size, unsigned align)
+ struct drm_suballoc **sa_bo,
+ unsigned int size)
{
- struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
- unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
- unsigned count;
- int i, r;
- signed long t;
-
- if (WARN_ON_ONCE(align > sa_manager->align))
- return -EINVAL;
-
- if (WARN_ON_ONCE(size > sa_manager->size))
- return -EINVAL;
-
- *sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
- if (!(*sa_bo))
- return -ENOMEM;
- (*sa_bo)->manager = sa_manager;
- (*sa_bo)->fence = NULL;
- INIT_LIST_HEAD(&(*sa_bo)->olist);
- INIT_LIST_HEAD(&(*sa_bo)->flist);
-
- spin_lock(&sa_manager->wq.lock);
- do {
- for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
- fences[i] = NULL;
- tries[i] = 0;
- }
-
- do {
- amdgpu_sa_bo_try_free(sa_manager);
-
- if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
- size, align)) {
- spin_unlock(&sa_manager->wq.lock);
- return 0;
- }
+ struct drm_suballoc *sa = drm_suballoc_new(&sa_manager->base, size,
+ GFP_KERNEL, false, 0);
- /* see if we can skip over some allocations */
- } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
+ if (IS_ERR(sa)) {
+ *sa_bo = NULL;
- for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
- if (fences[i])
- fences[count++] = dma_fence_get(fences[i]);
-
- if (count) {
- spin_unlock(&sa_manager->wq.lock);
- t = dma_fence_wait_any_timeout(fences, count, false,
- MAX_SCHEDULE_TIMEOUT,
- NULL);
- for (i = 0; i < count; ++i)
- dma_fence_put(fences[i]);
-
- r = (t > 0) ? 0 : t;
- spin_lock(&sa_manager->wq.lock);
- } else {
- /* if we have nothing to wait for block */
- r = wait_event_interruptible_locked(
- sa_manager->wq,
- amdgpu_sa_event(sa_manager, size, align)
- );
- }
-
- } while (!r);
+ return PTR_ERR(sa);
+ }
- spin_unlock(&sa_manager->wq.lock);
- kfree(*sa_bo);
- *sa_bo = NULL;
- return r;
+ *sa_bo = sa;
+ return 0;
}
-void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
- struct dma_fence *fence)
+void amdgpu_sa_bo_free(struct drm_suballoc **sa_bo, struct dma_fence *fence)
{
- struct amdgpu_sa_manager *sa_manager;
-
if (sa_bo == NULL || *sa_bo == NULL) {
return;
}
- sa_manager = (*sa_bo)->manager;
- spin_lock(&sa_manager->wq.lock);
- if (fence && !dma_fence_is_signaled(fence)) {
- uint32_t idx;
-
- (*sa_bo)->fence = dma_fence_get(fence);
- idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
- list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
- } else {
- amdgpu_sa_bo_remove_locked(*sa_bo);
- }
- wake_up_all_locked(&sa_manager->wq);
- spin_unlock(&sa_manager->wq.lock);
+ drm_suballoc_free(*sa_bo, fence);
*sa_bo = NULL;
}
@@ -413,26 +108,8 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m)
{
- struct amdgpu_sa_bo *i;
+ struct drm_printer p = drm_seq_file_printer(m);
- spin_lock(&sa_manager->wq.lock);
- list_for_each_entry(i, &sa_manager->olist, olist) {
- uint64_t soffset = i->soffset + sa_manager->gpu_addr;
- uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
- if (&i->olist == sa_manager->hole) {
- seq_printf(m, ">");
- } else {
- seq_printf(m, " ");
- }
- seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
- soffset, eoffset, eoffset - soffset);
-
- if (i->fence)
- seq_printf(m, " protected by 0x%08x on context %llu",
- i->fence->seqno, i->fence->context);
-
- seq_printf(m, "\n");
- }
- spin_unlock(&sa_manager->wq.lock);
+ drm_suballoc_dump_debug_info(&sa_manager->base, &p, sa_manager->gpu_addr);
}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
new file mode 100644
index 000000000000..341beec59537
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2017 Valve Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Andres Rodriguez <andresx7@gmail.com>
+ */
+
+#include <linux/file.h>
+#include <linux/pid.h>
+
+#include <drm/amdgpu_drm.h>
+
+#include "amdgpu.h"
+#include "amdgpu_sched.h"
+#include "amdgpu_vm.h"
+
+static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
+ int fd,
+ int32_t priority)
+{
+ CLASS(fd, f)(fd);
+ struct amdgpu_fpriv *fpriv;
+ struct amdgpu_ctx_mgr *mgr;
+ struct amdgpu_ctx *ctx;
+ uint32_t id;
+ int r;
+
+ if (fd_empty(f))
+ return -EINVAL;
+
+ r = amdgpu_file_to_fpriv(fd_file(f), &fpriv);
+ if (r)
+ return r;
+
+ mgr = &fpriv->ctx_mgr;
+ mutex_lock(&mgr->lock);
+ idr_for_each_entry(&mgr->ctx_handles, ctx, id)
+ amdgpu_ctx_priority_override(ctx, priority);
+ mutex_unlock(&mgr->lock);
+
+ return 0;
+}
+
+static int amdgpu_sched_context_priority_override(struct amdgpu_device *adev,
+ int fd,
+ unsigned ctx_id,
+ int32_t priority)
+{
+ CLASS(fd, f)(fd);
+ struct amdgpu_fpriv *fpriv;
+ struct amdgpu_ctx *ctx;
+ int r;
+
+ if (fd_empty(f))
+ return -EINVAL;
+
+ r = amdgpu_file_to_fpriv(fd_file(f), &fpriv);
+ if (r)
+ return r;
+
+ ctx = amdgpu_ctx_get(fpriv, ctx_id);
+
+ if (!ctx)
+ return -EINVAL;
+
+ amdgpu_ctx_priority_override(ctx, priority);
+ amdgpu_ctx_put(ctx);
+ return 0;
+}
+
+int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ union drm_amdgpu_sched *args = data;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ int r;
+
+ /* First check the op, then the op's argument.
+ */
+ switch (args->in.op) {
+ case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE:
+ case AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE:
+ break;
+ default:
+ DRM_ERROR("Invalid sched op specified: %d\n", args->in.op);
+ return -EINVAL;
+ }
+
+ if (!amdgpu_ctx_priority_is_valid(args->in.priority)) {
+ WARN(1, "Invalid context priority %d\n", args->in.priority);
+ return -EINVAL;
+ }
+
+ switch (args->in.op) {
+ case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE:
+ r = amdgpu_sched_process_priority_override(adev,
+ args->in.fd,
+ args->in.priority);
+ break;
+ case AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE:
+ r = amdgpu_sched_context_priority_override(adev,
+ args->in.fd,
+ args->in.ctx_id,
+ args->in.priority);
+ break;
+ default:
+ /* Impossible.
+ */
+ r = -EINVAL;
+ break;
+ }
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
index 9ef96aab3f24..67e5b2472f6a 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2017 Valve Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,16 +19,20 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
+ * Authors: Andres Rodriguez <andresx7@gmail.com>
*/
-#ifndef _EVENTINIT_H_
-#define _EVENTINIT_H_
+#ifndef __AMDGPU_SCHED_H__
+#define __AMDGPU_SCHED_H__
-#define PEM_CURRENT_POWERPLAY_FEATURE_VERSION 4
+enum drm_sched_priority;
-void pem_init_feature_info(struct pp_eventmgr *eventmgr);
-void pem_uninit_featureInfo(struct pp_eventmgr *eventmgr);
-int pem_register_interrupts(struct pp_eventmgr *eventmgr);
-int pem_unregister_interrupts(struct pp_eventmgr *eventmgr);
+struct drm_device;
+struct drm_file;
-#endif /* _EVENTINIT_H_ */
+int amdgpu_to_sched_priority(int amdgpu_priority,
+ enum drm_sched_priority *prio);
+int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+
+#endif // __AMDGPU_SCHED_H__
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
new file mode 100644
index 000000000000..8b8a04138711
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -0,0 +1,611 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_sdma.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_reset.h"
+#include "gc/gc_10_1_0_offset.h"
+#include "gc/gc_10_3_0_sh_mask.h"
+
+#define AMDGPU_CSA_SDMA_SIZE 64
+/* SDMA CSA reside in the 3rd page of CSA */
+#define AMDGPU_CSA_SDMA_OFFSET (4096 * 2)
+
+/*
+ * GPU SDMA IP block helpers function.
+ */
+
+struct amdgpu_sdma_instance *amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ if (ring == &adev->sdma.instance[i].ring ||
+ ring == &adev->sdma.instance[i].page)
+ return &adev->sdma.instance[i];
+
+ return NULL;
+}
+
+int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (ring == &adev->sdma.instance[i].ring ||
+ ring == &adev->sdma.instance[i].page) {
+ *index = i;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
+ unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint64_t csa_mc_addr;
+ uint32_t index = 0;
+ int r;
+
+ /* don't enable OS preemption on SDMA under SRIOV */
+ if (amdgpu_sriov_vf(adev) || vmid == 0 || !adev->gfx.mcbp)
+ return 0;
+
+ r = amdgpu_sdma_get_index_from_ring(ring, &index);
+
+ if (r || index > 31)
+ csa_mc_addr = 0;
+ else
+ csa_mc_addr = amdgpu_csa_vaddr(adev) +
+ AMDGPU_CSA_SDMA_OFFSET +
+ index * AMDGPU_CSA_SDMA_SIZE;
+
+ return csa_mc_addr;
+}
+
+int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block)
+{
+ int r, i;
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ if (amdgpu_ras_is_supported(adev, ras_block->block)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq,
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ if (r)
+ goto late_fini;
+ }
+ }
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+ return r;
+}
+
+int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry)
+{
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+
+ if (amdgpu_sriov_vf(adev))
+ return AMDGPU_RAS_SUCCESS;
+
+ amdgpu_ras_reset_gpu(adev);
+
+ return AMDGPU_RAS_SUCCESS;
+}
+
+int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->sdma.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
+
+static int amdgpu_sdma_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
+{
+ uint16_t version_major;
+ const struct common_firmware_header *header = NULL;
+ const struct sdma_firmware_header_v1_0 *hdr;
+ const struct sdma_firmware_header_v2_0 *hdr_v2;
+ const struct sdma_firmware_header_v3_0 *hdr_v3;
+
+ header = (const struct common_firmware_header *)
+ sdma_inst->fw->data;
+ version_major = le16_to_cpu(header->header_version_major);
+
+ switch (version_major) {
+ case 1:
+ hdr = (const struct sdma_firmware_header_v1_0 *)sdma_inst->fw->data;
+ sdma_inst->fw_version = le32_to_cpu(hdr->header.ucode_version);
+ sdma_inst->feature_version = le32_to_cpu(hdr->ucode_feature_version);
+ break;
+ case 2:
+ hdr_v2 = (const struct sdma_firmware_header_v2_0 *)sdma_inst->fw->data;
+ sdma_inst->fw_version = le32_to_cpu(hdr_v2->header.ucode_version);
+ sdma_inst->feature_version = le32_to_cpu(hdr_v2->ucode_feature_version);
+ break;
+ case 3:
+ hdr_v3 = (const struct sdma_firmware_header_v3_0 *)sdma_inst->fw->data;
+ sdma_inst->fw_version = le32_to_cpu(hdr_v3->header.ucode_version);
+ sdma_inst->feature_version = le32_to_cpu(hdr_v3->ucode_feature_version);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (sdma_inst->feature_version >= 20)
+ sdma_inst->burst_nop = true;
+
+ return 0;
+}
+
+void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
+ bool duplicate)
+{
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ amdgpu_ucode_release(&adev->sdma.instance[i].fw);
+ if (duplicate)
+ break;
+ }
+
+ memset((void *)adev->sdma.instance, 0,
+ sizeof(struct amdgpu_sdma_instance) * AMDGPU_MAX_SDMA_INSTANCES);
+}
+
+int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
+ u32 instance, bool duplicate)
+{
+ struct amdgpu_firmware_info *info = NULL;
+ const struct common_firmware_header *header = NULL;
+ int err, i;
+ const struct sdma_firmware_header_v2_0 *sdma_hdr;
+ const struct sdma_firmware_header_v3_0 *sdma_hv3;
+ uint16_t version_major;
+ char ucode_prefix[30];
+
+ amdgpu_ucode_ip_version_decode(adev, SDMA0_HWIP, ucode_prefix, sizeof(ucode_prefix));
+ if (instance == 0)
+ err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
+ else
+ err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s%d.bin", ucode_prefix, instance);
+ if (err)
+ goto out;
+
+ header = (const struct common_firmware_header *)
+ adev->sdma.instance[instance].fw->data;
+ version_major = le16_to_cpu(header->header_version_major);
+
+ if ((duplicate && instance) || (!duplicate && version_major > 1)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = amdgpu_sdma_init_inst_ctx(&adev->sdma.instance[instance]);
+ if (err)
+ goto out;
+
+ if (duplicate) {
+ for (i = 1; i < adev->sdma.num_instances; i++)
+ memcpy((void *)&adev->sdma.instance[i],
+ (void *)&adev->sdma.instance[0],
+ sizeof(struct amdgpu_sdma_instance));
+ }
+
+ DRM_DEBUG("psp_load == '%s'\n",
+ adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ switch (version_major) {
+ case 1:
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (!duplicate && (instance != i))
+ continue;
+ else {
+ /* Use a single copy per SDMA firmware type. PSP uses the same instance for all
+ * groups of SDMAs */
+ if ((amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
+ IP_VERSION(4, 4, 2) ||
+ amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
+ IP_VERSION(4, 4, 4) ||
+ amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
+ IP_VERSION(4, 4, 5)) &&
+ adev->firmware.load_type ==
+ AMDGPU_FW_LOAD_PSP &&
+ adev->sdma.num_inst_per_aid == i) {
+ break;
+ }
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
+ info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i;
+ info->fw = adev->sdma.instance[i].fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ }
+ }
+ break;
+ case 2:
+ sdma_hdr = (const struct sdma_firmware_header_v2_0 *)
+ adev->sdma.instance[0].fw->data;
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH0];
+ info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH0;
+ info->fw = adev->sdma.instance[0].fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes), PAGE_SIZE);
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_UCODE_TH1];
+ info->ucode_id = AMDGPU_UCODE_ID_SDMA_UCODE_TH1;
+ info->fw = adev->sdma.instance[0].fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes), PAGE_SIZE);
+ break;
+ case 3:
+ sdma_hv3 = (const struct sdma_firmware_header_v3_0 *)
+ adev->sdma.instance[0].fw->data;
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA_RS64];
+ info->ucode_id = AMDGPU_UCODE_ID_SDMA_RS64;
+ info->fw = adev->sdma.instance[0].fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(sdma_hv3->ucode_size_bytes), PAGE_SIZE);
+ break;
+ default:
+ err = -EINVAL;
+ }
+ }
+
+out:
+ if (err)
+ amdgpu_sdma_destroy_inst_ctx(adev, duplicate);
+ return err;
+}
+
+int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err = 0;
+ struct amdgpu_sdma_ras *ras = NULL;
+
+ /* adev->sdma.ras is NULL, which means sdma does not
+ * support ras function, then do nothing here.
+ */
+ if (!adev->sdma.ras)
+ return 0;
+
+ ras = adev->sdma.ras;
+
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register sdma ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "sdma");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__SDMA;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->sdma.ras_if = &ras->ras_block.ras_comm;
+
+ /* If not define special ras_late_init function, use default ras_late_init */
+ if (!ras->ras_block.ras_late_init)
+ ras->ras_block.ras_late_init = amdgpu_sdma_ras_late_init;
+
+ /* If not defined special ras_cb function, use default ras_cb */
+ if (!ras->ras_block.ras_cb)
+ ras->ras_block.ras_cb = amdgpu_sdma_process_ras_data_cb;
+
+ return 0;
+}
+
+/*
+ * debugfs for to enable/disable sdma job submission to specific core.
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_sdma_sched_mask_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u64 i, num_ring;
+ u64 mask = 0;
+ struct amdgpu_ring *ring, *page = NULL;
+
+ if (!adev)
+ return -ENODEV;
+
+ /* Determine the number of rings per SDMA instance
+ * (1 for sdma gfx ring, 2 if page queue exists)
+ */
+ if (adev->sdma.has_page_queue)
+ num_ring = 2;
+ else
+ num_ring = 1;
+
+ /* Calculate the maximum possible mask value
+ * based on the number of SDMA instances and rings
+ */
+ mask = BIT_ULL(adev->sdma.num_instances * num_ring) - 1;
+
+ if ((val & mask) == 0)
+ return -EINVAL;
+
+ for (i = 0; i < adev->sdma.num_instances; ++i) {
+ ring = &adev->sdma.instance[i].ring;
+ if (adev->sdma.has_page_queue)
+ page = &adev->sdma.instance[i].page;
+ if (val & BIT_ULL(i * num_ring))
+ ring->sched.ready = true;
+ else
+ ring->sched.ready = false;
+
+ if (page) {
+ if (val & BIT_ULL(i * num_ring + 1))
+ page->sched.ready = true;
+ else
+ page->sched.ready = false;
+ }
+ }
+ /* publish sched.ready flag update effective immediately across smp */
+ smp_rmb();
+ return 0;
+}
+
+static int amdgpu_debugfs_sdma_sched_mask_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u64 i, num_ring;
+ u64 mask = 0;
+ struct amdgpu_ring *ring, *page = NULL;
+
+ if (!adev)
+ return -ENODEV;
+
+ /* Determine the number of rings per SDMA instance
+ * (1 for sdma gfx ring, 2 if page queue exists)
+ */
+ if (adev->sdma.has_page_queue)
+ num_ring = 2;
+ else
+ num_ring = 1;
+
+ for (i = 0; i < adev->sdma.num_instances; ++i) {
+ ring = &adev->sdma.instance[i].ring;
+ if (adev->sdma.has_page_queue)
+ page = &adev->sdma.instance[i].page;
+
+ if (ring->sched.ready)
+ mask |= BIT_ULL(i * num_ring);
+ else
+ mask &= ~BIT_ULL(i * num_ring);
+
+ if (page) {
+ if (page->sched.ready)
+ mask |= BIT_ULL(i * num_ring + 1);
+ else
+ mask &= ~BIT_ULL(i * num_ring + 1);
+ }
+ }
+
+ *val = mask;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_sdma_sched_mask_fops,
+ amdgpu_debugfs_sdma_sched_mask_get,
+ amdgpu_debugfs_sdma_sched_mask_set, "%llx\n");
+
+#endif
+
+void amdgpu_debugfs_sdma_sched_mask_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+ char name[32];
+
+ if (!(adev->sdma.num_instances > 1))
+ return;
+ sprintf(name, "amdgpu_sdma_sched_mask");
+ debugfs_create_file(name, 0600, root, adev,
+ &amdgpu_debugfs_sdma_sched_mask_fops);
+#endif
+}
+
+static ssize_t amdgpu_get_sdma_reset_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (!adev)
+ return -ENODEV;
+
+ return amdgpu_show_reset_mask(buf, adev->sdma.supported_reset);
+}
+
+static DEVICE_ATTR(sdma_reset_mask, 0444,
+ amdgpu_get_sdma_reset_mask, NULL);
+
+int amdgpu_sdma_sysfs_reset_mask_init(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (!amdgpu_gpu_recovery)
+ return r;
+
+ if (adev->sdma.num_instances) {
+ r = device_create_file(adev->dev, &dev_attr_sdma_reset_mask);
+ if (r)
+ return r;
+ }
+
+ return r;
+}
+
+void amdgpu_sdma_sysfs_reset_mask_fini(struct amdgpu_device *adev)
+{
+ if (!amdgpu_gpu_recovery)
+ return;
+
+ if (adev->dev->kobj.sd) {
+ if (adev->sdma.num_instances)
+ device_remove_file(adev->dev, &dev_attr_sdma_reset_mask);
+ }
+}
+
+struct amdgpu_ring *amdgpu_sdma_get_shared_ring(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+{
+ if (adev->sdma.has_page_queue &&
+ (ring->me < adev->sdma.num_instances) &&
+ (ring == &adev->sdma.instance[ring->me].ring))
+ return &adev->sdma.instance[ring->me].page;
+ else
+ return NULL;
+}
+
+/**
+* amdgpu_sdma_is_shared_inv_eng - Check if a ring is an SDMA ring that shares a VM invalidation engine
+* @adev: Pointer to the AMDGPU device structure
+* @ring: Pointer to the ring structure to check
+*
+* This function checks if the given ring is an SDMA ring that shares a VM invalidation engine.
+* It returns true if the ring is such an SDMA ring, false otherwise.
+*/
+bool amdgpu_sdma_is_shared_inv_eng(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+{
+ int i = ring->me;
+
+ if (!adev->sdma.has_page_queue || i >= adev->sdma.num_instances)
+ return false;
+
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0))
+ return (ring == &adev->sdma.instance[i].page);
+ else
+ return false;
+}
+
+static int amdgpu_sdma_soft_reset(struct amdgpu_device *adev, u32 instance_id)
+{
+ struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];
+
+ if (sdma_instance->funcs->soft_reset_kernel_queue)
+ return sdma_instance->funcs->soft_reset_kernel_queue(adev, instance_id);
+
+ return -EOPNOTSUPP;
+}
+
+/**
+ * amdgpu_sdma_reset_engine - Reset a specific SDMA engine
+ * @adev: Pointer to the AMDGPU device
+ * @instance_id: Logical ID of the SDMA engine instance to reset
+ * @caller_handles_kernel_queues: Skip kernel queue processing. Caller
+ * will handle it.
+ *
+ * Returns: 0 on success, or a negative error code on failure.
+ */
+int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id,
+ bool caller_handles_kernel_queues)
+{
+ int ret = 0;
+ struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];
+ struct amdgpu_ring *gfx_ring = &sdma_instance->ring;
+ struct amdgpu_ring *page_ring = &sdma_instance->page;
+
+ mutex_lock(&sdma_instance->engine_reset_mutex);
+
+ if (!caller_handles_kernel_queues) {
+ /* Stop the scheduler's work queue for the GFX and page rings if they are running.
+ * This ensures that no new tasks are submitted to the queues while
+ * the reset is in progress.
+ */
+ drm_sched_wqueue_stop(&gfx_ring->sched);
+
+ if (adev->sdma.has_page_queue)
+ drm_sched_wqueue_stop(&page_ring->sched);
+ }
+
+ if (sdma_instance->funcs->stop_kernel_queue) {
+ sdma_instance->funcs->stop_kernel_queue(gfx_ring);
+ if (adev->sdma.has_page_queue)
+ sdma_instance->funcs->stop_kernel_queue(page_ring);
+ }
+
+ /* Perform the SDMA reset for the specified instance */
+ ret = amdgpu_sdma_soft_reset(adev, instance_id);
+ if (ret) {
+ dev_err(adev->dev, "Failed to reset SDMA logical instance %u\n", instance_id);
+ goto exit;
+ }
+
+ if (sdma_instance->funcs->start_kernel_queue) {
+ sdma_instance->funcs->start_kernel_queue(gfx_ring);
+ if (adev->sdma.has_page_queue)
+ sdma_instance->funcs->start_kernel_queue(page_ring);
+ }
+
+exit:
+ if (!caller_handles_kernel_queues) {
+ /* Restart the scheduler's work queue for the GFX and page rings
+ * if they were stopped by this function. This allows new tasks
+ * to be submitted to the queues after the reset is complete.
+ */
+ if (!ret) {
+ amdgpu_fence_driver_force_completion(gfx_ring);
+ drm_sched_wqueue_start(&gfx_ring->sched);
+ if (adev->sdma.has_page_queue) {
+ amdgpu_fence_driver_force_completion(page_ring);
+ drm_sched_wqueue_start(&page_ring->sched);
+ }
+ }
+ }
+ mutex_unlock(&sdma_instance->engine_reset_mutex);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
new file mode 100644
index 000000000000..34311f32be4c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_SDMA_H__
+#define __AMDGPU_SDMA_H__
+#include "amdgpu_ras.h"
+
+/* max number of IP instances */
+#define AMDGPU_MAX_SDMA_INSTANCES 16
+
+enum amdgpu_sdma_irq {
+ AMDGPU_SDMA_IRQ_INSTANCE0 = 0,
+ AMDGPU_SDMA_IRQ_INSTANCE1,
+ AMDGPU_SDMA_IRQ_INSTANCE2,
+ AMDGPU_SDMA_IRQ_INSTANCE3,
+ AMDGPU_SDMA_IRQ_INSTANCE4,
+ AMDGPU_SDMA_IRQ_INSTANCE5,
+ AMDGPU_SDMA_IRQ_INSTANCE6,
+ AMDGPU_SDMA_IRQ_INSTANCE7,
+ AMDGPU_SDMA_IRQ_INSTANCE8,
+ AMDGPU_SDMA_IRQ_INSTANCE9,
+ AMDGPU_SDMA_IRQ_INSTANCE10,
+ AMDGPU_SDMA_IRQ_INSTANCE11,
+ AMDGPU_SDMA_IRQ_INSTANCE12,
+ AMDGPU_SDMA_IRQ_INSTANCE13,
+ AMDGPU_SDMA_IRQ_INSTANCE14,
+ AMDGPU_SDMA_IRQ_INSTANCE15,
+ AMDGPU_SDMA_IRQ_LAST
+};
+
+#define NUM_SDMA(x) hweight32(x)
+
+struct amdgpu_sdma_funcs {
+ int (*stop_kernel_queue)(struct amdgpu_ring *ring);
+ int (*start_kernel_queue)(struct amdgpu_ring *ring);
+ int (*soft_reset_kernel_queue)(struct amdgpu_device *adev, u32 instance_id);
+};
+
+struct amdgpu_sdma_instance {
+ /* SDMA firmware */
+ const struct firmware *fw;
+ uint32_t fw_version;
+ uint32_t feature_version;
+
+ struct amdgpu_ring ring;
+ struct amdgpu_ring page;
+ bool burst_nop;
+ uint32_t aid_id;
+
+ struct amdgpu_bo *sdma_fw_obj;
+ uint64_t sdma_fw_gpu_addr;
+ uint32_t *sdma_fw_ptr;
+ struct mutex engine_reset_mutex;
+ /* track guilty state of GFX and PAGE queues */
+ bool gfx_guilty;
+ bool page_guilty;
+ const struct amdgpu_sdma_funcs *funcs;
+};
+
+enum amdgpu_sdma_ras_memory_id {
+ AMDGPU_SDMA_MBANK_DATA_BUF0 = 1,
+ AMDGPU_SDMA_MBANK_DATA_BUF1 = 2,
+ AMDGPU_SDMA_MBANK_DATA_BUF2 = 3,
+ AMDGPU_SDMA_MBANK_DATA_BUF3 = 4,
+ AMDGPU_SDMA_MBANK_DATA_BUF4 = 5,
+ AMDGPU_SDMA_MBANK_DATA_BUF5 = 6,
+ AMDGPU_SDMA_MBANK_DATA_BUF6 = 7,
+ AMDGPU_SDMA_MBANK_DATA_BUF7 = 8,
+ AMDGPU_SDMA_MBANK_DATA_BUF8 = 9,
+ AMDGPU_SDMA_MBANK_DATA_BUF9 = 10,
+ AMDGPU_SDMA_MBANK_DATA_BUF10 = 11,
+ AMDGPU_SDMA_MBANK_DATA_BUF11 = 12,
+ AMDGPU_SDMA_MBANK_DATA_BUF12 = 13,
+ AMDGPU_SDMA_MBANK_DATA_BUF13 = 14,
+ AMDGPU_SDMA_MBANK_DATA_BUF14 = 15,
+ AMDGPU_SDMA_MBANK_DATA_BUF15 = 16,
+ AMDGPU_SDMA_UCODE_BUF = 17,
+ AMDGPU_SDMA_RB_CMD_BUF = 18,
+ AMDGPU_SDMA_IB_CMD_BUF = 19,
+ AMDGPU_SDMA_UTCL1_RD_FIFO = 20,
+ AMDGPU_SDMA_UTCL1_RDBST_FIFO = 21,
+ AMDGPU_SDMA_UTCL1_WR_FIFO = 22,
+ AMDGPU_SDMA_DATA_LUT_FIFO = 23,
+ AMDGPU_SDMA_SPLIT_DAT_BUF = 24,
+ AMDGPU_SDMA_MEMORY_BLOCK_LAST,
+};
+
+struct amdgpu_sdma_ras {
+ struct amdgpu_ras_block_object ras_block;
+};
+
+struct amdgpu_sdma {
+ struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
+ struct amdgpu_irq_src trap_irq;
+ struct amdgpu_irq_src illegal_inst_irq;
+ struct amdgpu_irq_src fence_irq;
+ struct amdgpu_irq_src ecc_irq;
+ struct amdgpu_irq_src vm_hole_irq;
+ struct amdgpu_irq_src doorbell_invalid_irq;
+ struct amdgpu_irq_src pool_timeout_irq;
+ struct amdgpu_irq_src srbm_write_irq;
+ struct amdgpu_irq_src ctxt_empty_irq;
+
+ int num_instances;
+ uint32_t sdma_mask;
+ int num_inst_per_aid;
+ uint32_t srbm_soft_reset;
+ bool has_page_queue;
+ struct ras_common_if *ras_if;
+ struct amdgpu_sdma_ras *ras;
+ uint32_t *ip_dump;
+ uint32_t supported_reset;
+ struct list_head reset_callback_list;
+ bool no_user_submission;
+ bool disable_uq;
+};
+
+/*
+ * Provided by hw blocks that can move/clear data. e.g., gfx or sdma
+ * But currently, we use sdma to move data.
+ */
+struct amdgpu_buffer_funcs {
+ /* maximum bytes in a single operation */
+ uint32_t copy_max_bytes;
+
+ /* number of dw to reserve per operation */
+ unsigned copy_num_dw;
+
+ /* used for buffer migration */
+ void (*emit_copy_buffer)(struct amdgpu_ib *ib,
+ /* src addr in bytes */
+ uint64_t src_offset,
+ /* dst addr in bytes */
+ uint64_t dst_offset,
+ /* number of byte to transfer */
+ uint32_t byte_count,
+ uint32_t copy_flags);
+
+ /* maximum bytes in a single operation */
+ uint32_t fill_max_bytes;
+
+ /* number of dw to reserve per operation */
+ unsigned fill_num_dw;
+
+ /* used for buffer clearing */
+ void (*emit_fill_buffer)(struct amdgpu_ib *ib,
+ /* value to write to memory */
+ uint32_t src_data,
+ /* dst addr in bytes */
+ uint64_t dst_offset,
+ /* number of byte to fill */
+ uint32_t byte_count);
+};
+
+int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id,
+ bool caller_handles_kernel_queues);
+
+#define amdgpu_emit_copy_buffer(adev, ib, s, d, b, t) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b), (t))
+#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
+
+struct amdgpu_sdma_instance *
+amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring);
+int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index);
+uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, unsigned vmid);
+int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block);
+int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
+ void *err_data,
+ struct amdgpu_iv_entry *entry);
+int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
+int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, u32 instance,
+ bool duplicate);
+void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
+ bool duplicate);
+int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_sdma_sched_mask_init(struct amdgpu_device *adev);
+int amdgpu_sdma_sysfs_reset_mask_init(struct amdgpu_device *adev);
+void amdgpu_sdma_sysfs_reset_mask_fini(struct amdgpu_device *adev);
+bool amdgpu_sdma_is_shared_inv_eng(struct amdgpu_device *adev, struct amdgpu_ring *ring);
+struct amdgpu_ring *amdgpu_sdma_get_shared_ring(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
new file mode 100644
index 000000000000..41ebe690eeff
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/pm_runtime.h>
+
+#include "amdgpu.h"
+#include "amdgpu_securedisplay.h"
+
+/**
+ * DOC: AMDGPU SECUREDISPLAY debugfs test interface
+ *
+ * how to use?
+ * echo opcode <value> > <debugfs_dir>/dri/xxx/securedisplay_test
+ * eg. echo 1 > <debugfs_dir>/dri/xxx/securedisplay_test
+ * eg. echo 2 phy_id > <debugfs_dir>/dri/xxx/securedisplay_test
+ *
+ * opcode:
+ * 1:Query whether TA is responding used only for validation pupose
+ * 2: Send region of Interest and CRC value to I2C. (uint32)phy_id is
+ * send to determine which DIO scratch register should be used to get
+ * ROI and receive i2c_buf as the output.
+ *
+ * You can refer more detail from header file ta_securedisplay_if.h
+ *
+ */
+
+void psp_securedisplay_parse_resp_status(struct psp_context *psp,
+ enum ta_securedisplay_status status)
+{
+ switch (status) {
+ case TA_SECUREDISPLAY_STATUS__SUCCESS:
+ break;
+ case TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE:
+ dev_err(psp->adev->dev, "Secure display: Generic Failure.");
+ break;
+ case TA_SECUREDISPLAY_STATUS__INVALID_PARAMETER:
+ dev_err(psp->adev->dev, "Secure display: Invalid Parameter.");
+ break;
+ case TA_SECUREDISPLAY_STATUS__NULL_POINTER:
+ dev_err(psp->adev->dev, "Secure display: Null Pointer.");
+ break;
+ case TA_SECUREDISPLAY_STATUS__I2C_WRITE_ERROR:
+ dev_err(psp->adev->dev, "Secure display: Failed to write to I2C.");
+ break;
+ case TA_SECUREDISPLAY_STATUS__READ_DIO_SCRATCH_ERROR:
+ dev_err(psp->adev->dev, "Secure display: Failed to Read DIO Scratch Register.");
+ break;
+ case TA_SECUREDISPLAY_STATUS__READ_CRC_ERROR:
+ dev_err(psp->adev->dev, "Secure display: Failed to Read CRC");
+ break;
+ case TA_SECUREDISPLAY_STATUS__I2C_INIT_ERROR:
+ dev_err(psp->adev->dev, "Secure display: Failed to initialize I2C.");
+ break;
+ default:
+ dev_err(psp->adev->dev, "Secure display: Failed to parse status: %d\n", status);
+ }
+}
+
+void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct ta_securedisplay_cmd **cmd,
+ enum ta_securedisplay_command command_id)
+{
+ *cmd = (struct ta_securedisplay_cmd *)psp->securedisplay_context.context.mem_context.shared_buf;
+ memset(*cmd, 0, sizeof(struct ta_securedisplay_cmd));
+ (*cmd)->status = TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE;
+ (*cmd)->cmd_id = command_id;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ struct psp_context *psp = &adev->psp;
+ struct ta_securedisplay_cmd *securedisplay_cmd;
+ struct drm_device *dev = adev_to_drm(adev);
+ uint32_t phy_id;
+ uint32_t op;
+ char str[64];
+ int ret;
+
+ if (*pos || size > sizeof(str) - 1)
+ return -EINVAL;
+
+ memset(str, 0, sizeof(str));
+ ret = copy_from_user(str, buf, size);
+ if (ret)
+ return -EFAULT;
+
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
+ }
+
+ if (size < 3)
+ sscanf(str, "%u ", &op);
+ else
+ sscanf(str, "%u %u", &op, &phy_id);
+
+ switch (op) {
+ case 1:
+ mutex_lock(&psp->securedisplay_context.mutex);
+ psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+ TA_SECUREDISPLAY_COMMAND__QUERY_TA);
+ ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
+ if (!ret) {
+ if (securedisplay_cmd->status == TA_SECUREDISPLAY_STATUS__SUCCESS)
+ dev_info(adev->dev, "SECUREDISPLAY: query securedisplay TA ret is 0x%X\n",
+ securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret);
+ else
+ psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
+ }
+ mutex_unlock(&psp->securedisplay_context.mutex);
+ break;
+ case 2:
+ if (size < 3 || phy_id >= TA_SECUREDISPLAY_MAX_PHY) {
+ dev_err(adev->dev, "Invalid input: %s\n", str);
+ return -EINVAL;
+ }
+ mutex_lock(&psp->securedisplay_context.mutex);
+ psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
+ TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+ securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_id;
+ ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+ if (!ret) {
+ if (securedisplay_cmd->status == TA_SECUREDISPLAY_STATUS__SUCCESS) {
+ dev_info(adev->dev, "SECUREDISPLAY: I2C buffer out put is: %*ph\n",
+ TA_SECUREDISPLAY_I2C_BUFFER_SIZE,
+ securedisplay_cmd->securedisplay_out_message.send_roi_crc.i2c_buf);
+ } else {
+ psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
+ }
+ }
+ mutex_unlock(&psp->securedisplay_context.mutex);
+ break;
+ default:
+ dev_err(adev->dev, "Invalid input: %s\n", str);
+ }
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
+ return size;
+}
+
+static const struct file_operations amdgpu_securedisplay_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .read = NULL,
+ .write = amdgpu_securedisplay_debugfs_write,
+ .llseek = default_llseek
+};
+
+#endif
+
+void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+
+ if (!adev->psp.securedisplay_context.context.initialized)
+ return;
+
+ debugfs_create_file("securedisplay_test", S_IWUSR, adev_to_drm(adev)->primary->debugfs_root,
+ adev, &amdgpu_securedisplay_debugfs_ops);
+#endif
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h
new file mode 100644
index 000000000000..456ad68ed4b2
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#ifndef _AMDGPU_SECUREDISPLAY_H
+#define _AMDGPU_SECUREDISPLAY_H
+
+#include "amdgpu.h"
+#include "ta_secureDisplay_if.h"
+
+void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev);
+void psp_securedisplay_parse_resp_status(struct psp_context *psp,
+ enum ta_securedisplay_status status);
+void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct ta_securedisplay_cmd **cmd,
+ enum ta_securedisplay_command command_id);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
new file mode 100644
index 000000000000..a0b479d5fff1
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_seq64.h"
+
+#include <drm/drm_exec.h>
+
+/**
+ * DOC: amdgpu_seq64
+ *
+ * amdgpu_seq64 allocates a 64bit memory on each request in sequence order.
+ * seq64 driver is required for user queue fence memory allocation, TLB
+ * counters and VM updates. It has maximum count of 32768 64 bit slots.
+ */
+
+/**
+ * amdgpu_seq64_get_va_base - Get the seq64 va base address
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns:
+ * va base address on success
+ */
+static inline u64 amdgpu_seq64_get_va_base(struct amdgpu_device *adev)
+{
+ u64 addr = AMDGPU_VA_RESERVED_SEQ64_START(adev);
+
+ addr = amdgpu_gmc_sign_extend(addr);
+
+ return addr;
+}
+
+/**
+ * amdgpu_seq64_map - Map the seq64 memory to VM
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: vm pointer
+ * @bo_va: bo_va pointer
+ *
+ * Map the seq64 memory to the given VM.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure
+ */
+int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo_va **bo_va)
+{
+ struct amdgpu_bo *bo;
+ struct drm_exec exec;
+ u64 seq64_addr;
+ int r;
+
+ bo = adev->seq64.sbo;
+ if (!bo)
+ return -EINVAL;
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
+ drm_exec_until_all_locked(&exec) {
+ r = amdgpu_vm_lock_pd(vm, &exec, 0);
+ if (likely(!r))
+ r = drm_exec_lock_obj(&exec, &bo->tbo.base);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto error;
+ }
+
+ *bo_va = amdgpu_vm_bo_add(adev, vm, bo);
+ if (!*bo_va) {
+ r = -ENOMEM;
+ goto error;
+ }
+
+ seq64_addr = amdgpu_seq64_get_va_base(adev) & AMDGPU_GMC_HOLE_MASK;
+
+ r = amdgpu_vm_bo_map(adev, *bo_va, seq64_addr, 0,
+ AMDGPU_VA_RESERVED_SEQ64_SIZE,
+ AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_MTYPE_UC);
+ if (r) {
+ DRM_ERROR("failed to do bo_map on userq sem, err=%d\n", r);
+ amdgpu_vm_bo_del(adev, *bo_va);
+ goto error;
+ }
+
+ r = amdgpu_vm_bo_update(adev, *bo_va, false);
+ if (r) {
+ DRM_ERROR("failed to do vm_bo_update on userq sem\n");
+ amdgpu_vm_bo_del(adev, *bo_va);
+ goto error;
+ }
+
+error:
+ drm_exec_fini(&exec);
+ return r;
+}
+
+/**
+ * amdgpu_seq64_unmap - Unmap the seq64 memory
+ *
+ * @adev: amdgpu_device pointer
+ * @fpriv: DRM file private
+ *
+ * Unmap the seq64 memory from the given VM.
+ */
+void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv)
+{
+ struct amdgpu_vm *vm;
+ struct amdgpu_bo *bo;
+ struct drm_exec exec;
+ int r;
+
+ if (!fpriv->seq64_va)
+ return;
+
+ bo = adev->seq64.sbo;
+ if (!bo)
+ return;
+
+ vm = &fpriv->vm;
+
+ drm_exec_init(&exec, 0, 0);
+ drm_exec_until_all_locked(&exec) {
+ r = amdgpu_vm_lock_pd(vm, &exec, 0);
+ if (likely(!r))
+ r = drm_exec_lock_obj(&exec, &bo->tbo.base);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(r))
+ goto error;
+ }
+
+ amdgpu_vm_bo_del(adev, fpriv->seq64_va);
+
+ fpriv->seq64_va = NULL;
+
+error:
+ drm_exec_fini(&exec);
+}
+
+/**
+ * amdgpu_seq64_alloc - Allocate a 64 bit memory
+ *
+ * @adev: amdgpu_device pointer
+ * @va: VA to access the seq in process address space
+ * @gpu_addr: GPU address to access the seq
+ * @cpu_addr: CPU address to access the seq
+ *
+ * Alloc a 64 bit memory from seq64 pool.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure
+ */
+int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va,
+ u64 *gpu_addr, u64 **cpu_addr)
+{
+ unsigned long bit_pos;
+
+ bit_pos = find_first_zero_bit(adev->seq64.used, adev->seq64.num_sem);
+ if (bit_pos >= adev->seq64.num_sem)
+ return -ENOSPC;
+
+ __set_bit(bit_pos, adev->seq64.used);
+
+ *va = bit_pos * sizeof(u64) + amdgpu_seq64_get_va_base(adev);
+
+ if (gpu_addr)
+ *gpu_addr = bit_pos * sizeof(u64) + adev->seq64.gpu_addr;
+
+ *cpu_addr = bit_pos + adev->seq64.cpu_base_addr;
+
+ return 0;
+}
+
+/**
+ * amdgpu_seq64_free - Free the given 64 bit memory
+ *
+ * @adev: amdgpu_device pointer
+ * @va: gpu start address to be freed
+ *
+ * Free the given 64 bit memory from seq64 pool.
+ */
+void amdgpu_seq64_free(struct amdgpu_device *adev, u64 va)
+{
+ unsigned long bit_pos;
+
+ bit_pos = (va - amdgpu_seq64_get_va_base(adev)) / sizeof(u64);
+ if (bit_pos < adev->seq64.num_sem)
+ __clear_bit(bit_pos, adev->seq64.used);
+}
+
+/**
+ * amdgpu_seq64_fini - Cleanup seq64 driver
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Free the memory space allocated for seq64.
+ *
+ */
+void amdgpu_seq64_fini(struct amdgpu_device *adev)
+{
+ amdgpu_bo_free_kernel(&adev->seq64.sbo,
+ NULL,
+ (void **)&adev->seq64.cpu_base_addr);
+}
+
+/**
+ * amdgpu_seq64_init - Initialize seq64 driver
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate the required memory space for seq64.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure
+ */
+int amdgpu_seq64_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ if (adev->seq64.sbo)
+ return 0;
+
+ /*
+ * AMDGPU_MAX_SEQ64_SLOTS * sizeof(u64) * 8 = AMDGPU_MAX_SEQ64_SLOTS
+ * 64bit slots
+ */
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_VA_RESERVED_SEQ64_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
+ &adev->seq64.sbo, &adev->seq64.gpu_addr,
+ (void **)&adev->seq64.cpu_base_addr);
+ if (r) {
+ dev_warn(adev->dev, "(%d) create seq64 failed\n", r);
+ return r;
+ }
+
+ memset(adev->seq64.cpu_base_addr, 0, AMDGPU_VA_RESERVED_SEQ64_SIZE);
+
+ adev->seq64.num_sem = AMDGPU_MAX_SEQ64_SLOTS;
+ memset(&adev->seq64.used, 0, sizeof(adev->seq64.used));
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
new file mode 100644
index 000000000000..26a249aaaee1
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_seq64.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_SEQ64_H__
+#define __AMDGPU_SEQ64_H__
+
+#include "amdgpu_vm.h"
+
+#define AMDGPU_MAX_SEQ64_SLOTS (AMDGPU_VA_RESERVED_SEQ64_SIZE / sizeof(u64))
+
+struct amdgpu_seq64 {
+ struct amdgpu_bo *sbo;
+ u32 num_sem;
+ u64 gpu_addr;
+ u64 *cpu_base_addr;
+ DECLARE_BITMAP(used, AMDGPU_MAX_SEQ64_SLOTS);
+};
+
+void amdgpu_seq64_fini(struct amdgpu_device *adev);
+int amdgpu_seq64_init(struct amdgpu_device *adev);
+int amdgpu_seq64_alloc(struct amdgpu_device *adev, u64 *va, u64 *gpu_addr, u64 **cpu_addr);
+void amdgpu_seq64_free(struct amdgpu_device *adev, u64 gpu_addr);
+int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo_va **bo_va);
+void amdgpu_seq64_unmap(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv);
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h
index 1954ceaed439..ec9d12f85f39 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -20,17 +20,35 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
+#ifndef __AMDGPU_SMUIO_H__
+#define __AMDGPU_SMUIO_H__
-#ifndef _CZ_CLOCK_POWER_GATING_H_
-#define _CZ_CLOCK_POWER_GATING_H_
+enum amdgpu_pkg_type {
+ AMDGPU_PKG_TYPE_APU = 2,
+ AMDGPU_PKG_TYPE_CEM = 3,
+ AMDGPU_PKG_TYPE_OAM = 4,
+ AMDGPU_PKG_TYPE_UNKNOWN,
+};
-#include "cz_hwmgr.h"
-#include "pp_asicblocks.h"
+struct amdgpu_smuio_mcm_config_info {
+ int socket_id;
+ int die_id;
+};
-extern int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
-extern const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master;
-extern int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
-extern int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
-extern int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
-extern int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable);
-#endif /* _CZ_CLOCK_POWER_GATING_H_ */
+struct amdgpu_smuio_funcs {
+ u32 (*get_rom_index_offset)(struct amdgpu_device *adev);
+ u32 (*get_rom_data_offset)(struct amdgpu_device *adev);
+ void (*update_rom_clock_gating)(struct amdgpu_device *adev, bool enable);
+ void (*get_clock_gating_state)(struct amdgpu_device *adev, u64 *flags);
+ u32 (*get_die_id)(struct amdgpu_device *adev);
+ u32 (*get_socket_id)(struct amdgpu_device *adev);
+ enum amdgpu_pkg_type (*get_pkg_type)(struct amdgpu_device *adev);
+ bool (*is_host_gpu_xgmi_supported)(struct amdgpu_device *adev);
+ u64 (*get_gpu_clock_counter)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_smuio {
+ const struct amdgpu_smuio_funcs *funcs;
+};
+
+#endif /* __AMDGPU_SMUIO_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_socbb.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_socbb.h
new file mode 100644
index 000000000000..f4176cb01790
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_socbb.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_SOCBB_H__
+#define __AMDGPU_SOCBB_H__
+
+struct gpu_info_voltage_scaling_v1_0 {
+ uint32_t state;
+ uint32_t dscclk_mhz;
+ uint32_t dcfclk_mhz;
+ uint32_t socclk_mhz;
+ uint32_t dram_speed_mts;
+ uint32_t fabricclk_mhz;
+ uint32_t dispclk_mhz;
+ uint32_t phyclk_mhz;
+ uint32_t dppclk_mhz;
+};
+
+struct gpu_info_soc_bounding_box_v1_0 {
+ uint32_t sr_exit_time_us;
+ uint32_t sr_enter_plus_exit_time_us;
+ uint32_t urgent_latency_us;
+ uint32_t urgent_latency_pixel_data_only_us;
+ uint32_t urgent_latency_pixel_mixed_with_vm_data_us;
+ uint32_t urgent_latency_vm_data_only_us;
+ uint32_t writeback_latency_us;
+ uint32_t ideal_dram_bw_after_urgent_percent;
+ uint32_t pct_ideal_dram_sdp_bw_after_urgent_pixel_only; // PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly
+ uint32_t pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm;
+ uint32_t pct_ideal_dram_sdp_bw_after_urgent_vm_only;
+ uint32_t max_avg_sdp_bw_use_normal_percent;
+ uint32_t max_avg_dram_bw_use_normal_percent;
+ uint32_t max_request_size_bytes;
+ uint32_t downspread_percent;
+ uint32_t dram_page_open_time_ns;
+ uint32_t dram_rw_turnaround_time_ns;
+ uint32_t dram_return_buffer_per_channel_bytes;
+ uint32_t dram_channel_width_bytes;
+ uint32_t fabric_datapath_to_dcn_data_return_bytes;
+ uint32_t dcn_downspread_percent;
+ uint32_t dispclk_dppclk_vco_speed_mhz;
+ uint32_t dfs_vco_period_ps;
+ uint32_t urgent_out_of_order_return_per_channel_pixel_only_bytes;
+ uint32_t urgent_out_of_order_return_per_channel_pixel_and_vm_bytes;
+ uint32_t urgent_out_of_order_return_per_channel_vm_only_bytes;
+ uint32_t round_trip_ping_latency_dcfclk_cycles;
+ uint32_t urgent_out_of_order_return_per_channel_bytes;
+ uint32_t channel_interleave_bytes;
+ uint32_t num_banks;
+ uint32_t num_chans;
+ uint32_t vmm_page_size_bytes;
+ uint32_t dram_clock_change_latency_us;
+ uint32_t writeback_dram_clock_change_latency_us;
+ uint32_t return_bus_width_bytes;
+ uint32_t voltage_override;
+ uint32_t xfc_bus_transport_time_us;
+ uint32_t xfc_xbuf_latency_tolerance_us;
+ uint32_t use_urgent_burst_bw;
+ uint32_t num_states;
+ struct gpu_info_voltage_scaling_v1_0 clock_limits[8];
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index ed814e6d0207..d6ae9974c952 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: MIT
/*
* Copyright 2014 Advanced Micro Devices, Inc.
* All Rights Reserved.
@@ -28,9 +29,11 @@
* Christian König <christian.koenig@amd.com>
*/
-#include <drm/drmP.h>
+#include <linux/dma-fence-chain.h>
+
#include "amdgpu.h"
#include "amdgpu_trace.h"
+#include "amdgpu_amdkfd.h"
struct amdgpu_sync_entry {
struct hlist_node node;
@@ -49,7 +52,6 @@ static struct kmem_cache *amdgpu_sync_slab;
void amdgpu_sync_create(struct amdgpu_sync *sync)
{
hash_init(sync->fences);
- sync->last_vm_update = NULL;
}
/**
@@ -63,7 +65,7 @@ void amdgpu_sync_create(struct amdgpu_sync *sync)
static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
struct dma_fence *f)
{
- struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
if (s_fence) {
struct amdgpu_ring *ring;
@@ -78,17 +80,26 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev,
/**
* amdgpu_sync_get_owner - extract the owner of a fence
*
- * @fence: fence get the owner from
+ * @f: fence get the owner from
*
* Extract who originally created the fence.
*/
static void *amdgpu_sync_get_owner(struct dma_fence *f)
{
- struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *s_fence;
+ struct amdgpu_amdkfd_fence *kfd_fence;
+
+ if (!f)
+ return AMDGPU_FENCE_OWNER_UNDEFINED;
+ s_fence = to_drm_sched_fence(f);
if (s_fence)
return s_fence->owner;
+ kfd_fence = to_amdgpu_amdkfd_fence(f);
+ if (kfd_fence)
+ return AMDGPU_FENCE_OWNER_KFD;
+
return AMDGPU_FENCE_OWNER_UNDEFINED;
}
@@ -124,11 +135,16 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
struct amdgpu_sync_entry *e;
hash_for_each_possible(sync->fences, e, node, f->context) {
- if (unlikely(e->fence->context != f->context))
- continue;
+ if (dma_fence_is_signaled(e->fence)) {
+ dma_fence_put(e->fence);
+ e->fence = dma_fence_get(f);
+ return true;
+ }
- amdgpu_sync_keep_later(&e->fence, f);
- return true;
+ if (likely(e->fence->context == f->context)) {
+ amdgpu_sync_keep_later(&e->fence, f);
+ return true;
+ }
}
return false;
}
@@ -137,25 +153,23 @@ static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f)
* amdgpu_sync_fence - remember to sync to this fence
*
* @sync: sync object to add fence to
- * @fence: fence to sync to
+ * @f: fence to sync to
+ * @flags: memory allocation flags to use when allocating sync entry
*
+ * Add the fence to the sync object.
*/
-int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct dma_fence *f)
+int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
+ gfp_t flags)
{
struct amdgpu_sync_entry *e;
if (!f)
return 0;
- if (amdgpu_sync_same_dev(adev, f) &&
- amdgpu_sync_get_owner(f) == AMDGPU_FENCE_OWNER_VM)
- amdgpu_sync_keep_later(&sync->last_vm_update, f);
-
if (amdgpu_sync_add_later(sync, f))
return 0;
- e = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
+ e = kmem_cache_alloc(amdgpu_sync_slab, flags);
if (!e)
return -ENOMEM;
@@ -164,66 +178,132 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
return 0;
}
+/* Determine based on the owner and mode if we should sync to a fence or not */
+static bool amdgpu_sync_test_fence(struct amdgpu_device *adev,
+ enum amdgpu_sync_mode mode,
+ void *owner, struct dma_fence *f)
+{
+ void *fence_owner = amdgpu_sync_get_owner(f);
+
+ /* Always sync to moves, no matter what */
+ if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED)
+ return true;
+
+ /* We only want to trigger KFD eviction fences on
+ * evict or move jobs. Skip KFD fences otherwise.
+ */
+ if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
+ owner != AMDGPU_FENCE_OWNER_UNDEFINED)
+ return false;
+
+ /* Never sync to VM updates either. */
+ if (fence_owner == AMDGPU_FENCE_OWNER_VM &&
+ owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
+ owner != AMDGPU_FENCE_OWNER_KFD)
+ return false;
+
+ /* Ignore fences depending on the sync mode */
+ switch (mode) {
+ case AMDGPU_SYNC_ALWAYS:
+ return true;
+
+ case AMDGPU_SYNC_NE_OWNER:
+ if (amdgpu_sync_same_dev(adev, f) &&
+ fence_owner == owner)
+ return false;
+ break;
+
+ case AMDGPU_SYNC_EQ_OWNER:
+ if (amdgpu_sync_same_dev(adev, f) &&
+ fence_owner != owner)
+ return false;
+ break;
+
+ case AMDGPU_SYNC_EXPLICIT:
+ return false;
+ }
+
+ WARN(debug_evictions && fence_owner == AMDGPU_FENCE_OWNER_KFD,
+ "Adding eviction fence to sync obj");
+ return true;
+}
+
/**
* amdgpu_sync_resv - sync to a reservation object
*
+ * @adev: amdgpu device
* @sync: sync object to add fences from reservation object to
* @resv: reservation object with embedded fence
- * @shared: true if we should only sync to the exclusive fence
+ * @mode: how owner affects which fences we sync to
+ * @owner: owner of the planned job submission
*
* Sync to the fence
*/
-int amdgpu_sync_resv(struct amdgpu_device *adev,
- struct amdgpu_sync *sync,
- struct reservation_object *resv,
+int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+ struct dma_resv *resv, enum amdgpu_sync_mode mode,
void *owner)
{
- struct reservation_object_list *flist;
+ struct dma_resv_iter cursor;
struct dma_fence *f;
- void *fence_owner;
- unsigned i;
- int r = 0;
+ int r;
if (resv == NULL)
return -EINVAL;
+ /* Implicitly sync only to KERNEL, WRITE and READ */
+ dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_READ, f) {
+ dma_fence_chain_for_each(f, f) {
+ struct dma_fence *tmp = dma_fence_chain_contained(f);
+
+ if (amdgpu_sync_test_fence(adev, mode, owner, tmp)) {
+ r = amdgpu_sync_fence(sync, f, GFP_KERNEL);
+ dma_fence_put(f);
+ if (r)
+ return r;
+ break;
+ }
+ }
+ }
+ return 0;
+}
- /* always sync to the exclusive fence */
- f = reservation_object_get_excl(resv);
- r = amdgpu_sync_fence(adev, sync, f);
+/**
+ * amdgpu_sync_kfd - sync to KFD fences
+ *
+ * @sync: sync object to add KFD fences to
+ * @resv: reservation object with KFD fences
+ *
+ * Extract all KFD fences and add them to the sync object.
+ */
+int amdgpu_sync_kfd(struct amdgpu_sync *sync, struct dma_resv *resv)
+{
+ struct dma_resv_iter cursor;
+ struct dma_fence *f;
+ int r = 0;
- flist = reservation_object_get_list(resv);
- if (!flist || r)
- return r;
+ dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
+ dma_resv_for_each_fence_unlocked(&cursor, f) {
+ void *fence_owner = amdgpu_sync_get_owner(f);
- for (i = 0; i < flist->shared_count; ++i) {
- f = rcu_dereference_protected(flist->shared[i],
- reservation_object_held(resv));
- if (amdgpu_sync_same_dev(adev, f)) {
- /* VM updates are only interesting
- * for other VM updates and moves.
- */
- fence_owner = amdgpu_sync_get_owner(f);
- if ((owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
- (fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED) &&
- ((owner == AMDGPU_FENCE_OWNER_VM) !=
- (fence_owner == AMDGPU_FENCE_OWNER_VM)))
- continue;
-
- /* Ignore fence from the same owner as
- * long as it isn't undefined.
- */
- if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
- fence_owner == owner)
- continue;
- }
+ if (fence_owner != AMDGPU_FENCE_OWNER_KFD)
+ continue;
- r = amdgpu_sync_fence(adev, sync, f);
+ r = amdgpu_sync_fence(sync, f, GFP_KERNEL);
if (r)
break;
}
+ dma_resv_iter_end(&cursor);
+
return r;
}
+/* Free the entry back to the slab */
+static void amdgpu_sync_entry_free(struct amdgpu_sync_entry *e)
+{
+ hash_del(&e->node);
+ dma_fence_put(e->fence);
+ kmem_cache_free(amdgpu_sync_slab, e);
+}
+
/**
* amdgpu_sync_peek_fence - get the next fence not signaled yet
*
@@ -242,8 +322,12 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
hash_for_each_safe(sync->fences, i, tmp, e, node) {
struct dma_fence *f = e->fence;
- struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
+ struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
+ if (dma_fence_is_signaled(f)) {
+ amdgpu_sync_entry_free(e);
+ continue;
+ }
if (ring && s_fence) {
/* For fences from the same ring it is sufficient
* when they are scheduled.
@@ -256,13 +340,6 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
}
}
- if (dma_fence_is_signaled(f)) {
- hash_del(&e->node);
- dma_fence_put(f);
- kmem_cache_free(amdgpu_sync_slab, e);
- continue;
- }
-
return f;
}
@@ -299,6 +376,103 @@ struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
}
/**
+ * amdgpu_sync_clone - clone a sync object
+ *
+ * @source: sync object to clone
+ * @clone: pointer to destination sync object
+ *
+ * Adds references to all unsignaled fences in @source to @clone. Also
+ * removes signaled fences from @source while at it.
+ */
+int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
+{
+ struct amdgpu_sync_entry *e;
+ struct hlist_node *tmp;
+ struct dma_fence *f;
+ int i, r;
+
+ hash_for_each_safe(source->fences, i, tmp, e, node) {
+ f = e->fence;
+ if (!dma_fence_is_signaled(f)) {
+ r = amdgpu_sync_fence(clone, f, GFP_KERNEL);
+ if (r)
+ return r;
+ } else {
+ amdgpu_sync_entry_free(e);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_sync_move - move all fences from src to dst
+ *
+ * @src: source of the fences, empty after function
+ * @dst: destination for the fences
+ *
+ * Moves all fences from source to destination. All fences in destination are
+ * freed and source is empty after the function call.
+ */
+void amdgpu_sync_move(struct amdgpu_sync *src, struct amdgpu_sync *dst)
+{
+ unsigned int i;
+
+ amdgpu_sync_free(dst);
+
+ for (i = 0; i < HASH_SIZE(src->fences); ++i)
+ hlist_move_list(&src->fences[i], &dst->fences[i]);
+}
+
+/**
+ * amdgpu_sync_push_to_job - push fences into job
+ * @sync: sync object to get the fences from
+ * @job: job to push the fences into
+ *
+ * Add all unsignaled fences from sync to job.
+ */
+int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job)
+{
+ struct amdgpu_sync_entry *e;
+ struct hlist_node *tmp;
+ struct dma_fence *f;
+ int i, r;
+
+ hash_for_each_safe(sync->fences, i, tmp, e, node) {
+ f = e->fence;
+ if (dma_fence_is_signaled(f)) {
+ amdgpu_sync_entry_free(e);
+ continue;
+ }
+
+ dma_fence_get(f);
+ r = drm_sched_job_add_dependency(&job->base, f);
+ if (r) {
+ dma_fence_put(f);
+ return r;
+ }
+ }
+ return 0;
+}
+
+int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
+{
+ struct amdgpu_sync_entry *e;
+ struct hlist_node *tmp;
+ int i, r;
+
+ hash_for_each_safe(sync->fences, i, tmp, e, node) {
+ r = dma_fence_wait(e->fence, intr);
+ if (r)
+ return r;
+
+ amdgpu_sync_entry_free(e);
+ }
+
+ return 0;
+}
+
+/**
* amdgpu_sync_free - free the sync object
*
* @sync: sync object to use
@@ -309,15 +483,10 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
{
struct amdgpu_sync_entry *e;
struct hlist_node *tmp;
- unsigned i;
-
- hash_for_each_safe(sync->fences, i, tmp, e, node) {
- hash_del(&e->node);
- dma_fence_put(e->fence);
- kmem_cache_free(amdgpu_sync_slab, e);
- }
+ unsigned int i;
- dma_fence_put(sync->last_vm_update);
+ hash_for_each_safe(sync->fences, i, tmp, e, node)
+ amdgpu_sync_entry_free(e);
}
/**
@@ -327,9 +496,7 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
*/
int amdgpu_sync_init(void)
{
- amdgpu_sync_slab = kmem_cache_create(
- "amdgpu_sync", sizeof(struct amdgpu_sync_entry), 0,
- SLAB_HWCACHE_ALIGN, NULL);
+ amdgpu_sync_slab = KMEM_CACHE(amdgpu_sync_entry, SLAB_HWCACHE_ALIGN);
if (!amdgpu_sync_slab)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index 605be266e07f..51eb4382c91e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -27,28 +27,39 @@
#include <linux/hashtable.h>
struct dma_fence;
-struct reservation_object;
+struct dma_resv;
struct amdgpu_device;
struct amdgpu_ring;
+struct amdgpu_job;
+
+enum amdgpu_sync_mode {
+ AMDGPU_SYNC_ALWAYS,
+ AMDGPU_SYNC_NE_OWNER,
+ AMDGPU_SYNC_EQ_OWNER,
+ AMDGPU_SYNC_EXPLICIT
+};
/*
* Container for fences used to sync command submissions.
*/
struct amdgpu_sync {
DECLARE_HASHTABLE(fences, 4);
- struct dma_fence *last_vm_update;
};
void amdgpu_sync_create(struct amdgpu_sync *sync);
-int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
- struct dma_fence *f);
-int amdgpu_sync_resv(struct amdgpu_device *adev,
- struct amdgpu_sync *sync,
- struct reservation_object *resv,
+int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
+ gfp_t flags);
+int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+ struct dma_resv *resv, enum amdgpu_sync_mode mode,
void *owner);
+int amdgpu_sync_kfd(struct amdgpu_sync *sync, struct dma_resv *resv);
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
+int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone);
+void amdgpu_sync_move(struct amdgpu_sync *src, struct amdgpu_sync *dst);
+int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job);
+int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr);
void amdgpu_sync_free(struct amdgpu_sync *sync);
int amdgpu_sync_init(void);
void amdgpu_sync_fini(void);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
deleted file mode 100644
index 15510dadde01..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright 2009 VMware, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Michel Dänzer
- */
-#include <drm/drmP.h>
-#include <drm/amdgpu_drm.h>
-#include "amdgpu.h"
-#include "amdgpu_uvd.h"
-#include "amdgpu_vce.h"
-
-/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
-static void amdgpu_do_test_moves(struct amdgpu_device *adev)
-{
- struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
- struct amdgpu_bo *vram_obj = NULL;
- struct amdgpu_bo **gtt_obj = NULL;
- uint64_t gtt_addr, vram_addr;
- unsigned n, size;
- int i, r;
-
- size = 1024 * 1024;
-
- /* Number of tests =
- * (Total GTT - IB pool - writeback page - ring buffers) / test size
- */
- n = adev->mc.gtt_size - AMDGPU_IB_POOL_SIZE*64*1024;
- for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
- if (adev->rings[i])
- n -= adev->rings[i]->ring_size;
- if (adev->wb.wb_obj)
- n -= AMDGPU_GPU_PAGE_SIZE;
- if (adev->irq.ih.ring_obj)
- n -= adev->irq.ih.ring_size;
- n /= size;
-
- gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
- if (!gtt_obj) {
- DRM_ERROR("Failed to allocate %d pointers\n", n);
- r = 1;
- goto out_cleanup;
- }
-
- r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
- NULL, NULL, &vram_obj);
- if (r) {
- DRM_ERROR("Failed to create VRAM object\n");
- goto out_cleanup;
- }
- r = amdgpu_bo_reserve(vram_obj, false);
- if (unlikely(r != 0))
- goto out_unref;
- r = amdgpu_bo_pin(vram_obj, AMDGPU_GEM_DOMAIN_VRAM, &vram_addr);
- if (r) {
- DRM_ERROR("Failed to pin VRAM object\n");
- goto out_unres;
- }
- for (i = 0; i < n; i++) {
- void *gtt_map, *vram_map;
- void **gtt_start, **gtt_end;
- void **vram_start, **vram_end;
- struct dma_fence *fence = NULL;
-
- r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
- NULL, gtt_obj + i);
- if (r) {
- DRM_ERROR("Failed to create GTT object %d\n", i);
- goto out_lclean;
- }
-
- r = amdgpu_bo_reserve(gtt_obj[i], false);
- if (unlikely(r != 0))
- goto out_lclean_unref;
- r = amdgpu_bo_pin(gtt_obj[i], AMDGPU_GEM_DOMAIN_GTT, &gtt_addr);
- if (r) {
- DRM_ERROR("Failed to pin GTT object %d\n", i);
- goto out_lclean_unres;
- }
-
- r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
- if (r) {
- DRM_ERROR("Failed to map GTT object %d\n", i);
- goto out_lclean_unpin;
- }
-
- for (gtt_start = gtt_map, gtt_end = gtt_map + size;
- gtt_start < gtt_end;
- gtt_start++)
- *gtt_start = gtt_start;
-
- amdgpu_bo_kunmap(gtt_obj[i]);
-
- r = amdgpu_copy_buffer(ring, gtt_addr, vram_addr,
- size, NULL, &fence, false);
-
- if (r) {
- DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
- goto out_lclean_unpin;
- }
-
- r = dma_fence_wait(fence, false);
- if (r) {
- DRM_ERROR("Failed to wait for GTT->VRAM fence %d\n", i);
- goto out_lclean_unpin;
- }
-
- dma_fence_put(fence);
-
- r = amdgpu_bo_kmap(vram_obj, &vram_map);
- if (r) {
- DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
- goto out_lclean_unpin;
- }
-
- for (gtt_start = gtt_map, gtt_end = gtt_map + size,
- vram_start = vram_map, vram_end = vram_map + size;
- vram_start < vram_end;
- gtt_start++, vram_start++) {
- if (*vram_start != gtt_start) {
- DRM_ERROR("Incorrect GTT->VRAM copy %d: Got 0x%p, "
- "expected 0x%p (GTT/VRAM offset "
- "0x%16llx/0x%16llx)\n",
- i, *vram_start, gtt_start,
- (unsigned long long)
- (gtt_addr - adev->mc.gtt_start +
- (void*)gtt_start - gtt_map),
- (unsigned long long)
- (vram_addr - adev->mc.vram_start +
- (void*)gtt_start - gtt_map));
- amdgpu_bo_kunmap(vram_obj);
- goto out_lclean_unpin;
- }
- *vram_start = vram_start;
- }
-
- amdgpu_bo_kunmap(vram_obj);
-
- r = amdgpu_copy_buffer(ring, vram_addr, gtt_addr,
- size, NULL, &fence, false);
-
- if (r) {
- DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
- goto out_lclean_unpin;
- }
-
- r = dma_fence_wait(fence, false);
- if (r) {
- DRM_ERROR("Failed to wait for VRAM->GTT fence %d\n", i);
- goto out_lclean_unpin;
- }
-
- dma_fence_put(fence);
-
- r = amdgpu_bo_kmap(gtt_obj[i], &gtt_map);
- if (r) {
- DRM_ERROR("Failed to map GTT object after copy %d\n", i);
- goto out_lclean_unpin;
- }
-
- for (gtt_start = gtt_map, gtt_end = gtt_map + size,
- vram_start = vram_map, vram_end = vram_map + size;
- gtt_start < gtt_end;
- gtt_start++, vram_start++) {
- if (*gtt_start != vram_start) {
- DRM_ERROR("Incorrect VRAM->GTT copy %d: Got 0x%p, "
- "expected 0x%p (VRAM/GTT offset "
- "0x%16llx/0x%16llx)\n",
- i, *gtt_start, vram_start,
- (unsigned long long)
- (vram_addr - adev->mc.vram_start +
- (void*)vram_start - vram_map),
- (unsigned long long)
- (gtt_addr - adev->mc.gtt_start +
- (void*)vram_start - vram_map));
- amdgpu_bo_kunmap(gtt_obj[i]);
- goto out_lclean_unpin;
- }
- }
-
- amdgpu_bo_kunmap(gtt_obj[i]);
-
- DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
- gtt_addr - adev->mc.gtt_start);
- continue;
-
-out_lclean_unpin:
- amdgpu_bo_unpin(gtt_obj[i]);
-out_lclean_unres:
- amdgpu_bo_unreserve(gtt_obj[i]);
-out_lclean_unref:
- amdgpu_bo_unref(&gtt_obj[i]);
-out_lclean:
- for (--i; i >= 0; --i) {
- amdgpu_bo_unpin(gtt_obj[i]);
- amdgpu_bo_unreserve(gtt_obj[i]);
- amdgpu_bo_unref(&gtt_obj[i]);
- }
- if (fence)
- dma_fence_put(fence);
- break;
- }
-
- amdgpu_bo_unpin(vram_obj);
-out_unres:
- amdgpu_bo_unreserve(vram_obj);
-out_unref:
- amdgpu_bo_unref(&vram_obj);
-out_cleanup:
- kfree(gtt_obj);
- if (r) {
- pr_warn("Error while testing BO move\n");
- }
-}
-
-void amdgpu_test_moves(struct amdgpu_device *adev)
-{
- if (adev->mman.buffer_funcs)
- amdgpu_do_test_moves(adev);
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 8601904e670a..d13e64a69e25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -1,12 +1,33 @@
-#if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#if !defined(_AMDGPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _AMDGPU_TRACE_H_
#include <linux/stringify.h>
#include <linux/types.h>
#include <linux/tracepoint.h>
-#include <drm/drmP.h>
-
#undef TRACE_SYSTEM
#define TRACE_SYSTEM amdgpu
#define TRACE_INCLUDE_FILE amdgpu_trace
@@ -14,7 +35,7 @@
#define AMDGPU_JOB_GET_TIMELINE_NAME(job) \
job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished)
-TRACE_EVENT(amdgpu_mm_rreg,
+TRACE_EVENT(amdgpu_device_rreg,
TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
TP_ARGS(did, reg, value),
TP_STRUCT__entry(
@@ -33,7 +54,7 @@ TRACE_EVENT(amdgpu_mm_rreg,
(unsigned long)__entry->value)
);
-TRACE_EVENT(amdgpu_mm_wreg,
+TRACE_EVENT(amdgpu_device_wreg,
TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
TP_ARGS(did, reg, value),
TP_STRUCT__entry(
@@ -53,37 +74,40 @@ TRACE_EVENT(amdgpu_mm_wreg,
);
TRACE_EVENT(amdgpu_iv,
- TP_PROTO(struct amdgpu_iv_entry *iv),
- TP_ARGS(iv),
+ TP_PROTO(unsigned ih, struct amdgpu_iv_entry *iv),
+ TP_ARGS(ih, iv),
TP_STRUCT__entry(
+ __field(unsigned, ih)
__field(unsigned, client_id)
__field(unsigned, src_id)
__field(unsigned, ring_id)
- __field(unsigned, vm_id)
- __field(unsigned, vm_id_src)
+ __field(unsigned, vmid)
+ __field(unsigned, vmid_src)
__field(uint64_t, timestamp)
__field(unsigned, timestamp_src)
- __field(unsigned, pas_id)
+ __field(unsigned, pasid)
__array(unsigned, src_data, 4)
),
TP_fast_assign(
+ __entry->ih = ih;
__entry->client_id = iv->client_id;
__entry->src_id = iv->src_id;
__entry->ring_id = iv->ring_id;
- __entry->vm_id = iv->vm_id;
- __entry->vm_id_src = iv->vm_id_src;
+ __entry->vmid = iv->vmid;
+ __entry->vmid_src = iv->vmid_src;
__entry->timestamp = iv->timestamp;
__entry->timestamp_src = iv->timestamp_src;
- __entry->pas_id = iv->pas_id;
+ __entry->pasid = iv->pasid;
__entry->src_data[0] = iv->src_data[0];
__entry->src_data[1] = iv->src_data[1];
__entry->src_data[2] = iv->src_data[2];
__entry->src_data[3] = iv->src_data[3];
),
- TP_printk("client_id:%u src_id:%u ring:%u vm_id:%u timestamp: %llu pas_id:%u src_data: %08x %08x %08x %08x\n",
- __entry->client_id, __entry->src_id,
- __entry->ring_id, __entry->vm_id,
- __entry->timestamp, __entry->pas_id,
+ TP_printk("ih:%u client_id:%u src_id:%u ring:%u vmid:%u "
+ "timestamp: %llu pasid:%u src_data: %08x %08x %08x %08x",
+ __entry->ih, __entry->client_id, __entry->src_id,
+ __entry->ring_id, __entry->vmid,
+ __entry->timestamp, __entry->pasid,
__entry->src_data[0], __entry->src_data[1],
__entry->src_data[2], __entry->src_data[3])
);
@@ -103,21 +127,23 @@ TRACE_EVENT(amdgpu_bo_create,
TP_fast_assign(
__entry->bo = bo;
- __entry->pages = bo->tbo.num_pages;
- __entry->type = bo->tbo.mem.mem_type;
- __entry->prefer = bo->prefered_domains;
+ __entry->pages = PFN_UP(bo->tbo.resource->size);
+ __entry->type = bo->tbo.resource->mem_type;
+ __entry->prefer = bo->preferred_domains;
__entry->allow = bo->allowed_domains;
__entry->visible = bo->flags;
),
- TP_printk("bo=%p, pages=%u, type=%d, prefered=%d, allowed=%d, visible=%d",
+ TP_printk("bo=%p, pages=%u, type=%d, preferred=%d, allowed=%d, visible=%d",
__entry->bo, __entry->pages, __entry->type,
__entry->prefer, __entry->allow, __entry->visible)
);
TRACE_EVENT(amdgpu_cs,
- TP_PROTO(struct amdgpu_cs_parser *p, int i),
- TP_ARGS(p, i),
+ TP_PROTO(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib),
+ TP_ARGS(p, job, ib),
TP_STRUCT__entry(
__field(struct amdgpu_bo_list *, bo_list)
__field(u32, ring)
@@ -127,10 +153,10 @@ TRACE_EVENT(amdgpu_cs,
TP_fast_assign(
__entry->bo_list = p->bo_list;
- __entry->ring = p->job->ring->idx;
- __entry->dw = p->job->ibs[i].length_dw;
+ __entry->ring = to_amdgpu_ring(job->base.entity->rq->sched)->idx;
+ __entry->dw = ib->length_dw;
__entry->fences = amdgpu_fence_count_emitted(
- p->job->ring);
+ to_amdgpu_ring(job->base.entity->rq->sched));
),
TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
__entry->bo_list, __entry->ring, __entry->dw,
@@ -141,51 +167,47 @@ TRACE_EVENT(amdgpu_cs_ioctl,
TP_PROTO(struct amdgpu_job *job),
TP_ARGS(job),
TP_STRUCT__entry(
- __field(uint64_t, sched_job_id)
__string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
- __field(unsigned int, context)
- __field(unsigned int, seqno)
+ __field(u64, context)
+ __field(u64, seqno)
__field(struct dma_fence *, fence)
- __field(char *, ring_name)
+ __string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
),
TP_fast_assign(
- __entry->sched_job_id = job->base.id;
- __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
+ __assign_str(timeline);
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
- __entry->ring_name = job->ring->name;
+ __assign_str(ring);
__entry->num_ibs = job->num_ibs;
),
- TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
- __entry->sched_job_id, __get_str(timeline), __entry->context,
- __entry->seqno, __entry->ring_name, __entry->num_ibs)
+ TP_printk("timeline=%s, fence=%llu:%llu, ring_name=%s, num_ibs=%u",
+ __get_str(timeline), __entry->context,
+ __entry->seqno, __get_str(ring), __entry->num_ibs)
);
TRACE_EVENT(amdgpu_sched_run_job,
TP_PROTO(struct amdgpu_job *job),
TP_ARGS(job),
TP_STRUCT__entry(
- __field(uint64_t, sched_job_id)
__string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
- __field(unsigned int, context)
- __field(unsigned int, seqno)
- __field(char *, ring_name)
+ __field(u64, context)
+ __field(u64, seqno)
+ __string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
),
TP_fast_assign(
- __entry->sched_job_id = job->base.id;
- __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
+ __assign_str(timeline);
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
- __entry->ring_name = job->ring->name;
+ __assign_str(ring);
__entry->num_ibs = job->num_ibs;
),
- TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
- __entry->sched_job_id, __get_str(timeline), __entry->context,
- __entry->seqno, __entry->ring_name, __entry->num_ibs)
+ TP_printk("timeline=%s, fence=%llu:%llu, ring_name=%s, num_ibs=%u",
+ __get_str(timeline), __entry->context,
+ __entry->seqno, __get_str(ring), __entry->num_ibs)
);
@@ -194,24 +216,25 @@ TRACE_EVENT(amdgpu_vm_grab_id,
struct amdgpu_job *job),
TP_ARGS(vm, ring, job),
TP_STRUCT__entry(
- __field(struct amdgpu_vm *, vm)
+ __field(u32, pasid)
+ __string(ring, ring->name)
__field(u32, ring)
- __field(u32, vm_id)
+ __field(u32, vmid)
__field(u32, vm_hub)
__field(u64, pd_addr)
__field(u32, needs_flush)
),
TP_fast_assign(
- __entry->vm = vm;
- __entry->ring = ring->idx;
- __entry->vm_id = job->vm_id;
- __entry->vm_hub = ring->funcs->vmhub,
+ __entry->pasid = vm->pasid;
+ __assign_str(ring);
+ __entry->vmid = job->vmid;
+ __entry->vm_hub = ring->vm_hub,
__entry->pd_addr = job->vm_pd_addr;
__entry->needs_flush = job->vm_needs_flush;
),
- TP_printk("vm=%p, ring=%u, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
- __entry->vm, __entry->ring, __entry->vm_id,
+ TP_printk("pasid=%d, ring=%s, id=%u, hub=%u, pd_addr=%010Lx needs_flush=%u",
+ __entry->pasid, __get_str(ring), __entry->vmid,
__entry->vm_hub, __entry->pd_addr, __entry->needs_flush)
);
@@ -224,17 +247,17 @@ TRACE_EVENT(amdgpu_vm_bo_map,
__field(long, start)
__field(long, last)
__field(u64, offset)
- __field(u32, flags)
+ __field(u64, flags)
),
TP_fast_assign(
- __entry->bo = bo_va ? bo_va->bo : NULL;
+ __entry->bo = bo_va ? bo_va->base.bo : NULL;
__entry->start = mapping->start;
__entry->last = mapping->last;
__entry->offset = mapping->offset;
__entry->flags = mapping->flags;
),
- TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x",
+ TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%llx",
__entry->bo, __entry->start, __entry->last,
__entry->offset, __entry->flags)
);
@@ -248,17 +271,17 @@ TRACE_EVENT(amdgpu_vm_bo_unmap,
__field(long, start)
__field(long, last)
__field(u64, offset)
- __field(u32, flags)
+ __field(u64, flags)
),
TP_fast_assign(
- __entry->bo = bo_va->bo;
+ __entry->bo = bo_va ? bo_va->base.bo : NULL;
__entry->start = mapping->start;
__entry->last = mapping->last;
__entry->offset = mapping->offset;
__entry->flags = mapping->flags;
),
- TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x",
+ TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%llx",
__entry->bo, __entry->start, __entry->last,
__entry->offset, __entry->flags)
);
@@ -269,7 +292,7 @@ DECLARE_EVENT_CLASS(amdgpu_vm_mapping,
TP_STRUCT__entry(
__field(u64, soffset)
__field(u64, eoffset)
- __field(u32, flags)
+ __field(u64, flags)
),
TP_fast_assign(
@@ -277,7 +300,7 @@ DECLARE_EVENT_CLASS(amdgpu_vm_mapping,
__entry->eoffset = mapping->last + 1;
__entry->flags = mapping->flags;
),
- TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
+ TP_printk("soffs=%010llx, eoffs=%010llx, flags=%llx",
__entry->soffset, __entry->eoffset, __entry->flags)
);
@@ -291,16 +314,65 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping,
TP_ARGS(mapping)
);
+DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs,
+ TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
+ TP_ARGS(mapping)
+);
+
+TRACE_EVENT(amdgpu_vm_update_ptes,
+ TP_PROTO(struct amdgpu_vm_update_params *p,
+ uint64_t start, uint64_t end,
+ unsigned int nptes, uint64_t dst,
+ uint64_t incr, uint64_t flags,
+ pid_t pid, uint64_t vm_ctx),
+ TP_ARGS(p, start, end, nptes, dst, incr, flags, pid, vm_ctx),
+ TP_STRUCT__entry(
+ __field(u64, start)
+ __field(u64, end)
+ __field(u64, flags)
+ __field(unsigned int, nptes)
+ __field(u64, incr)
+ __field(pid_t, pid)
+ __field(u64, vm_ctx)
+ __dynamic_array(u64, dst, nptes)
+ ),
+
+ TP_fast_assign(
+ unsigned int i;
+
+ __entry->start = start;
+ __entry->end = end;
+ __entry->flags = flags;
+ __entry->incr = incr;
+ __entry->nptes = nptes;
+ __entry->pid = pid;
+ __entry->vm_ctx = vm_ctx;
+ for (i = 0; i < nptes; ++i) {
+ u64 addr = p->pages_addr ? amdgpu_vm_map_gart(
+ p->pages_addr, dst) : dst;
+
+ ((u64 *)__get_dynamic_array(dst))[i] = addr;
+ dst += incr;
+ }
+ ),
+ TP_printk("pid:%u vm_ctx:0x%llx start:0x%010llx end:0x%010llx,"
+ " flags:0x%llx, incr:%llu, dst:\n%s", __entry->pid,
+ __entry->vm_ctx, __entry->start, __entry->end,
+ __entry->flags, __entry->incr, __print_array(
+ __get_dynamic_array(dst), __entry->nptes, 8))
+);
+
TRACE_EVENT(amdgpu_vm_set_ptes,
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
- uint32_t incr, uint32_t flags),
- TP_ARGS(pe, addr, count, incr, flags),
+ uint32_t incr, uint64_t flags, bool immediate),
+ TP_ARGS(pe, addr, count, incr, flags, immediate),
TP_STRUCT__entry(
__field(u64, pe)
__field(u64, addr)
__field(u32, count)
__field(u32, incr)
- __field(u32, flags)
+ __field(u64, flags)
+ __field(bool, immediate)
),
TP_fast_assign(
@@ -309,50 +381,108 @@ TRACE_EVENT(amdgpu_vm_set_ptes,
__entry->count = count;
__entry->incr = incr;
__entry->flags = flags;
+ __entry->immediate = immediate;
),
- TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%08x, count=%u",
- __entry->pe, __entry->addr, __entry->incr,
- __entry->flags, __entry->count)
+ TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u, "
+ "immediate=%d", __entry->pe, __entry->addr, __entry->incr,
+ __entry->flags, __entry->count, __entry->immediate)
);
TRACE_EVENT(amdgpu_vm_copy_ptes,
- TP_PROTO(uint64_t pe, uint64_t src, unsigned count),
- TP_ARGS(pe, src, count),
+ TP_PROTO(uint64_t pe, uint64_t src, unsigned count, bool immediate),
+ TP_ARGS(pe, src, count, immediate),
TP_STRUCT__entry(
__field(u64, pe)
__field(u64, src)
__field(u32, count)
+ __field(bool, immediate)
),
TP_fast_assign(
__entry->pe = pe;
__entry->src = src;
__entry->count = count;
+ __entry->immediate = immediate;
),
- TP_printk("pe=%010Lx, src=%010Lx, count=%u",
- __entry->pe, __entry->src, __entry->count)
+ TP_printk("pe=%010Lx, src=%010Lx, count=%u, immediate=%d",
+ __entry->pe, __entry->src, __entry->count,
+ __entry->immediate)
);
TRACE_EVENT(amdgpu_vm_flush,
- TP_PROTO(struct amdgpu_ring *ring, unsigned vm_id,
+ TP_PROTO(struct amdgpu_ring *ring, unsigned vmid,
uint64_t pd_addr),
- TP_ARGS(ring, vm_id, pd_addr),
+ TP_ARGS(ring, vmid, pd_addr),
TP_STRUCT__entry(
- __field(u32, ring)
- __field(u32, vm_id)
+ __string(ring, ring->name)
+ __field(u32, vmid)
__field(u32, vm_hub)
__field(u64, pd_addr)
),
TP_fast_assign(
- __entry->ring = ring->idx;
- __entry->vm_id = vm_id;
- __entry->vm_hub = ring->funcs->vmhub;
+ __assign_str(ring);
+ __entry->vmid = vmid;
+ __entry->vm_hub = ring->vm_hub;
__entry->pd_addr = pd_addr;
),
- TP_printk("ring=%u, id=%u, hub=%u, pd_addr=%010Lx",
- __entry->ring, __entry->vm_id,
- __entry->vm_hub,__entry->pd_addr)
+ TP_printk("ring=%s, id=%u, hub=%u, pd_addr=%010Lx",
+ __get_str(ring), __entry->vmid,
+ __entry->vm_hub, __entry->pd_addr)
+);
+
+DECLARE_EVENT_CLASS(amdgpu_pasid,
+ TP_PROTO(unsigned pasid),
+ TP_ARGS(pasid),
+ TP_STRUCT__entry(
+ __field(unsigned, pasid)
+ ),
+ TP_fast_assign(
+ __entry->pasid = pasid;
+ ),
+ TP_printk("pasid=%u", __entry->pasid)
+);
+
+DEFINE_EVENT(amdgpu_pasid, amdgpu_pasid_allocated,
+ TP_PROTO(unsigned pasid),
+ TP_ARGS(pasid)
+);
+
+DEFINE_EVENT(amdgpu_pasid, amdgpu_pasid_freed,
+ TP_PROTO(unsigned pasid),
+ TP_ARGS(pasid)
+);
+
+TRACE_EVENT(amdgpu_isolation,
+ TP_PROTO(void *prev, void *next),
+ TP_ARGS(prev, next),
+ TP_STRUCT__entry(
+ __field(void *, prev)
+ __field(void *, next)
+ ),
+
+ TP_fast_assign(
+ __entry->prev = prev;
+ __entry->next = next;
+ ),
+ TP_printk("prev=%p, next=%p",
+ __entry->prev,
+ __entry->next)
+);
+
+TRACE_EVENT(amdgpu_cleaner_shader,
+ TP_PROTO(struct amdgpu_ring *ring, struct dma_fence *fence),
+ TP_ARGS(ring, fence),
+ TP_STRUCT__entry(
+ __string(ring, ring->name)
+ __field(u64, seqno)
+ ),
+
+ TP_fast_assign(
+ __assign_str(ring);
+ __entry->seqno = fence->seqno;
+ ),
+ TP_printk("ring=%s, seqno=%Lu", __get_str(ring), __entry->seqno)
);
TRACE_EVENT(amdgpu_bo_list_set,
@@ -391,8 +521,8 @@ TRACE_EVENT(amdgpu_cs_bo_status,
__entry->total_bo, __entry->total_size)
);
-TRACE_EVENT(amdgpu_ttm_bo_move,
- TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement),
+TRACE_EVENT(amdgpu_bo_move,
+ TP_PROTO(struct amdgpu_bo *bo, uint32_t new_placement, uint32_t old_placement),
TP_ARGS(bo, new_placement, old_placement),
TP_STRUCT__entry(
__field(struct amdgpu_bo *, bo)
@@ -412,10 +542,46 @@ TRACE_EVENT(amdgpu_ttm_bo_move,
__entry->new_placement, __entry->bo_size)
);
+TRACE_EVENT(amdgpu_ib_pipe_sync,
+ TP_PROTO(struct amdgpu_job *sched_job, struct dma_fence *fence),
+ TP_ARGS(sched_job, fence),
+ TP_STRUCT__entry(
+ __string(ring, sched_job->base.sched->name)
+ __field(struct dma_fence *, fence)
+ __field(u64, ctx)
+ __field(u64, seqno)
+ ),
+
+ TP_fast_assign(
+ __assign_str(ring);
+ __entry->fence = fence;
+ __entry->ctx = fence->context;
+ __entry->seqno = fence->seqno;
+ ),
+ TP_printk("job ring=%s need pipe sync to fence=%llu:%llu",
+ __get_str(ring), __entry->ctx, __entry->seqno)
+);
+
+TRACE_EVENT(amdgpu_reset_reg_dumps,
+ TP_PROTO(uint32_t address, uint32_t value),
+ TP_ARGS(address, value),
+ TP_STRUCT__entry(
+ __field(uint32_t, address)
+ __field(uint32_t, value)
+ ),
+ TP_fast_assign(
+ __entry->address = address;
+ __entry->value = value;
+ ),
+ TP_printk("amdgpu register dump 0x%x: 0x%x",
+ __entry->address,
+ __entry->value)
+);
+
#undef AMDGPU_JOB_GET_TIMELINE_NAME
#endif
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/amd/amdgpu
#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
index 385b7e1d72f9..b96d885f6e33 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
@@ -1,8 +1,29 @@
+// SPDX-License-Identifier: MIT
/* Copyright Red Hat Inc 2010.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
* Author : Dave Airlie <airlied@redhat.com>
*/
-#include <drm/drmP.h>
+
#include <drm/amdgpu_drm.h>
+#include "amdgpu_cs.h"
#include "amdgpu.h"
#define CREATE_TRACE_POINTS
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 5db0230e45c6..aa9ee5dffa45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -29,331 +29,400 @@
* Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
* Dave Airlie
*/
-#include <ttm/ttm_bo_api.h>
-#include <ttm/ttm_bo_driver.h>
-#include <ttm/ttm_placement.h>
-#include <ttm/ttm_module.h>
-#include <ttm/ttm_page_alloc.h>
-#include <drm/drmP.h>
-#include <drm/amdgpu_drm.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/pagemap.h>
+#include <linux/sched/task.h>
+#include <linux/sched/mm.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
-#include <linux/swiotlb.h>
#include <linux/swap.h>
-#include <linux/pagemap.h>
-#include <linux/debugfs.h>
-#include "amdgpu.h"
-#include "bif/bif_4_1_d.h"
-
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
-
-static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
-static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
-
-
-/*
- * Global memory.
- */
-static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref)
-{
- return ttm_mem_global_init(ref->object);
-}
-
-static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
-{
- ttm_mem_global_release(ref->object);
-}
+#include <linux/dma-buf.h>
+#include <linux/sizes.h>
+#include <linux/module.h>
-static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
-{
- struct drm_global_reference *global_ref;
- struct amdgpu_ring *ring;
- struct amd_sched_rq *rq;
- int r;
+#include <drm/drm_drv.h>
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
+#include <drm/ttm/ttm_tt.h>
- adev->mman.mem_global_referenced = false;
- global_ref = &adev->mman.mem_global_ref;
- global_ref->global_type = DRM_GLOBAL_TTM_MEM;
- global_ref->size = sizeof(struct ttm_mem_global);
- global_ref->init = &amdgpu_ttm_mem_global_init;
- global_ref->release = &amdgpu_ttm_mem_global_release;
- r = drm_global_item_ref(global_ref);
- if (r) {
- DRM_ERROR("Failed setting up TTM memory accounting "
- "subsystem.\n");
- goto error_mem;
- }
-
- adev->mman.bo_global_ref.mem_glob =
- adev->mman.mem_global_ref.object;
- global_ref = &adev->mman.bo_global_ref.ref;
- global_ref->global_type = DRM_GLOBAL_TTM_BO;
- global_ref->size = sizeof(struct ttm_bo_global);
- global_ref->init = &ttm_bo_global_init;
- global_ref->release = &ttm_bo_global_release;
- r = drm_global_item_ref(global_ref);
- if (r) {
- DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- goto error_bo;
- }
+#include <drm/amdgpu_drm.h>
- ring = adev->mman.buffer_funcs_ring;
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
- r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
- rq, amdgpu_sched_jobs);
- if (r) {
- DRM_ERROR("Failed setting up TTM BO move run queue.\n");
- goto error_entity;
- }
+#include "amdgpu.h"
+#include "amdgpu_object.h"
+#include "amdgpu_trace.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_sdma.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_hmm.h"
+#include "amdgpu_atomfirmware.h"
+#include "amdgpu_res_cursor.h"
+#include "bif/bif_4_1_d.h"
- adev->mman.mem_global_referenced = true;
+MODULE_IMPORT_NS("DMA_BUF");
- return 0;
+#define AMDGPU_TTM_VRAM_MAX_DW_READ ((size_t)128)
-error_entity:
- drm_global_item_unref(&adev->mman.bo_global_ref.ref);
-error_bo:
- drm_global_item_unref(&adev->mman.mem_global_ref);
-error_mem:
- return r;
-}
+static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
+ struct ttm_tt *ttm,
+ struct ttm_resource *bo_mem);
+static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
+ struct ttm_tt *ttm);
-static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
+static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
+ unsigned int type,
+ uint64_t size_in_page)
{
- if (adev->mman.mem_global_referenced) {
- amd_sched_entity_fini(adev->mman.entity.sched,
- &adev->mman.entity);
- drm_global_item_unref(&adev->mman.bo_global_ref.ref);
- drm_global_item_unref(&adev->mman.mem_global_ref);
- adev->mman.mem_global_referenced = false;
- }
-}
-
-static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
- return 0;
-}
-
-static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
- struct ttm_mem_type_manager *man)
-{
- struct amdgpu_device *adev;
-
- adev = amdgpu_ttm_adev(bdev);
-
- switch (type) {
- case TTM_PL_SYSTEM:
- /* System memory */
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- case TTM_PL_TT:
- man->func = &amdgpu_gtt_mgr_func;
- man->gpu_offset = adev->mc.gtt_start;
- man->available_caching = TTM_PL_MASK_CACHING;
- man->default_caching = TTM_PL_FLAG_CACHED;
- man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
- break;
- case TTM_PL_VRAM:
- /* "On-card" video ram */
- man->func = &amdgpu_vram_mgr_func;
- man->gpu_offset = adev->mc.vram_start;
- man->flags = TTM_MEMTYPE_FLAG_FIXED |
- TTM_MEMTYPE_FLAG_MAPPABLE;
- man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
- man->default_caching = TTM_PL_FLAG_WC;
- break;
- case AMDGPU_PL_GDS:
- case AMDGPU_PL_GWS:
- case AMDGPU_PL_OA:
- /* On-chip GDS memory*/
- man->func = &ttm_bo_manager_func;
- man->gpu_offset = 0;
- man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA;
- man->available_caching = TTM_PL_FLAG_UNCACHED;
- man->default_caching = TTM_PL_FLAG_UNCACHED;
- break;
- default:
- DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
- return -EINVAL;
- }
- return 0;
+ return ttm_range_man_init(&adev->mman.bdev, type,
+ false, size_in_page);
}
+/**
+ * amdgpu_evict_flags - Compute placement flags
+ *
+ * @bo: The buffer object to evict
+ * @placement: Possible destination(s) for evicted BO
+ *
+ * Fill in placement data when ttm_bo_evict() is called
+ */
static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
- static struct ttm_place placements = {
+ static const struct ttm_place placements = {
.fpfn = 0,
.lpfn = 0,
- .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
+ .mem_type = TTM_PL_SYSTEM,
+ .flags = 0
};
- unsigned i;
- if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
+ /* Don't handle scatter gather BOs */
+ if (bo->type == ttm_bo_type_sg) {
+ placement->num_placement = 0;
+ return;
+ }
+
+ /* Object isn't an AMDGPU object so ignore */
+ if (!amdgpu_bo_is_amdgpu_bo(bo)) {
placement->placement = &placements;
- placement->busy_placement = &placements;
placement->num_placement = 1;
- placement->num_busy_placement = 1;
return;
}
- abo = container_of(bo, struct amdgpu_bo, tbo);
- switch (bo->mem.mem_type) {
+
+ abo = ttm_to_amdgpu_bo(bo);
+ if (abo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) {
+ placement->num_placement = 0;
+ return;
+ }
+
+ switch (bo->resource->mem_type) {
+ case AMDGPU_PL_GDS:
+ case AMDGPU_PL_GWS:
+ case AMDGPU_PL_OA:
+ case AMDGPU_PL_DOORBELL:
+ case AMDGPU_PL_MMIO_REMAP:
+ placement->num_placement = 0;
+ return;
+
case TTM_PL_VRAM:
- if (adev->mman.buffer_funcs &&
- adev->mman.buffer_funcs_ring &&
- adev->mman.buffer_funcs_ring->ready == false) {
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+ if (!adev->mman.buffer_funcs_enabled) {
+ /* Move to system memory */
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+
+ } else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
+ !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
+ amdgpu_res_cpu_visible(adev, bo->resource)) {
+
+ /* Try evicting to the CPU inaccessible part of VRAM
+ * first, but only set GTT as busy placement, so this
+ * BO will be evicted to GTT rather than causing other
+ * BOs to be evicted from VRAM
+ */
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT |
+ AMDGPU_GEM_DOMAIN_CPU);
+ abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
+ abo->placements[0].lpfn = 0;
+ abo->placements[0].flags |= TTM_PL_FLAG_DESIRED;
} else {
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
- for (i = 0; i < abo->placement.num_placement; ++i) {
- if (!(abo->placements[i].flags &
- TTM_PL_FLAG_TT))
- continue;
-
- if (abo->placements[i].lpfn)
- continue;
-
- /* set an upper limit to force directly
- * allocating address space for the BO.
- */
- abo->placements[i].lpfn =
- adev->mc.gtt_size >> PAGE_SHIFT;
- }
+ /* Move to GTT memory */
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT |
+ AMDGPU_GEM_DOMAIN_CPU);
}
break;
case TTM_PL_TT:
+ case AMDGPU_PL_PREEMPT:
default:
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+ break;
}
*placement = abo->placement;
}
-static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
+/**
+ * amdgpu_ttm_map_buffer - Map memory into the GART windows
+ * @bo: buffer object to map
+ * @mem: memory object to map
+ * @mm_cur: range to map
+ * @window: which GART window to use
+ * @ring: DMA ring to use for the copy
+ * @tmz: if we should setup a TMZ enabled mapping
+ * @size: in number of bytes to map, out number of bytes mapped
+ * @addr: resulting address inside the MC address space
+ *
+ * Setup one of the GART windows to access a specific piece of memory or return
+ * the physical address for local memory.
+ */
+static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
+ struct ttm_resource *mem,
+ struct amdgpu_res_cursor *mm_cur,
+ unsigned int window, struct amdgpu_ring *ring,
+ bool tmz, uint64_t *size, uint64_t *addr)
{
- struct amdgpu_bo *abo = container_of(bo, struct amdgpu_bo, tbo);
+ struct amdgpu_device *adev = ring->adev;
+ unsigned int offset, num_pages, num_dw, num_bytes;
+ uint64_t src_addr, dst_addr;
+ struct amdgpu_job *job;
+ void *cpu_addr;
+ uint64_t flags;
+ unsigned int i;
+ int r;
- if (amdgpu_ttm_tt_get_usermm(bo->ttm))
- return -EPERM;
- return drm_vma_node_verify_access(&abo->gem_base.vma_node,
- filp->private_data);
-}
+ BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
+ AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
-static void amdgpu_move_null(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *new_mem)
-{
- struct ttm_mem_reg *old_mem = &bo->mem;
+ if (WARN_ON(mem->mem_type == AMDGPU_PL_PREEMPT))
+ return -EINVAL;
- BUG_ON(old_mem->mm_node != NULL);
- *old_mem = *new_mem;
- new_mem->mm_node = NULL;
-}
+ /* Map only what can't be accessed directly */
+ if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
+ *addr = amdgpu_ttm_domain_start(adev, mem->mem_type) +
+ mm_cur->start;
+ return 0;
+ }
-static int amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
- struct drm_mm_node *mm_node,
- struct ttm_mem_reg *mem,
- uint64_t *addr)
-{
- int r;
- switch (mem->mem_type) {
- case TTM_PL_TT:
- r = amdgpu_ttm_bind(bo, mem);
- if (r)
- return r;
+ /*
+ * If start begins at an offset inside the page, then adjust the size
+ * and addr accordingly
+ */
+ offset = mm_cur->start & ~PAGE_MASK;
- case TTM_PL_VRAM:
- *addr = mm_node->start << PAGE_SHIFT;
- *addr += bo->bdev->man[mem->mem_type].gpu_offset;
- break;
- default:
- DRM_ERROR("Unknown placement %d\n", mem->mem_type);
- return -EINVAL;
+ num_pages = PFN_UP(*size + offset);
+ num_pages = min_t(uint32_t, num_pages, AMDGPU_GTT_MAX_TRANSFER_SIZE);
+
+ *size = min(*size, (uint64_t)num_pages * PAGE_SIZE - offset);
+
+ *addr = adev->gmc.gart_start;
+ *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
+ AMDGPU_GPU_PAGE_SIZE;
+ *addr += offset;
+
+ num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
+ num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+
+ r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ num_dw * 4 + num_bytes,
+ AMDGPU_IB_POOL_DELAYED, &job,
+ AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER);
+ if (r)
+ return r;
+
+ src_addr = num_dw * 4;
+ src_addr += job->ibs[0].gpu_addr;
+
+ dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
+ dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
+ amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
+ dst_addr, num_bytes, 0);
+
+ amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+ WARN_ON(job->ibs[0].length_dw > num_dw);
+
+ flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
+ if (tmz)
+ flags |= AMDGPU_PTE_TMZ;
+
+ cpu_addr = &job->ibs[0].ptr[num_dw];
+
+ if (mem->mem_type == TTM_PL_TT) {
+ dma_addr_t *dma_addr;
+
+ dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
+ amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr);
+ } else {
+ dma_addr_t dma_address;
+
+ dma_address = mm_cur->start;
+ dma_address += adev->vm_manager.vram_base_offset;
+
+ for (i = 0; i < num_pages; ++i) {
+ amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, &dma_address,
+ flags, cpu_addr);
+ dma_address += PAGE_SIZE;
+ }
}
+ dma_fence_put(amdgpu_job_submit(job));
return 0;
}
-static int amdgpu_move_blit(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem,
- struct ttm_mem_reg *old_mem)
+/**
+ * amdgpu_ttm_copy_mem_to_mem - Helper function for copy
+ * @adev: amdgpu device
+ * @src: buffer/address where to read from
+ * @dst: buffer/address where to write to
+ * @size: number of bytes to copy
+ * @tmz: if a secure copy should be used
+ * @resv: resv object to sync to
+ * @f: Returns the last fence if multiple jobs are submitted.
+ *
+ * The function copies @size bytes from {src->mem + src->offset} to
+ * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
+ * move and different for a BO to BO copy.
+ *
+ */
+int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
+ const struct amdgpu_copy_mem *src,
+ const struct amdgpu_copy_mem *dst,
+ uint64_t size, bool tmz,
+ struct dma_resv *resv,
+ struct dma_fence **f)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
-
- struct drm_mm_node *old_mm, *new_mm;
- uint64_t old_start, old_size, new_start, new_size;
- unsigned long num_pages;
+ struct amdgpu_res_cursor src_mm, dst_mm;
struct dma_fence *fence = NULL;
- int r;
-
- BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0);
+ int r = 0;
+ uint32_t copy_flags = 0;
+ struct amdgpu_bo *abo_src, *abo_dst;
- if (!ring->ready) {
- DRM_ERROR("Trying to move memory with ring turned off.\n");
+ if (!adev->mman.buffer_funcs_enabled) {
+ dev_err(adev->dev,
+ "Trying to move memory with ring turned off.\n");
return -EINVAL;
}
- old_mm = old_mem->mm_node;
- r = amdgpu_mm_node_addr(bo, old_mm, old_mem, &old_start);
- if (r)
- return r;
- old_size = old_mm->size;
+ amdgpu_res_first(src->mem, src->offset, size, &src_mm);
+ amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm);
+ mutex_lock(&adev->mman.gtt_window_lock);
+ while (src_mm.remaining) {
+ uint64_t from, to, cur_size, tiling_flags;
+ uint32_t num_type, data_format, max_com, write_compress_disable;
+ struct dma_fence *next;
- new_mm = new_mem->mm_node;
- r = amdgpu_mm_node_addr(bo, new_mm, new_mem, &new_start);
- if (r)
- return r;
- new_size = new_mm->size;
+ /* Never copy more than 256MiB at once to avoid a timeout */
+ cur_size = min3(src_mm.size, dst_mm.size, 256ULL << 20);
- num_pages = new_mem->num_pages;
- while (num_pages) {
- unsigned long cur_pages = min(old_size, new_size);
- struct dma_fence *next;
+ /* Map src to window 0 and dst to window 1. */
+ r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm,
+ 0, ring, tmz, &cur_size, &from);
+ if (r)
+ goto error;
- r = amdgpu_copy_buffer(ring, old_start, new_start,
- cur_pages * PAGE_SIZE,
- bo->resv, &next, false);
+ r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm,
+ 1, ring, tmz, &cur_size, &to);
+ if (r)
+ goto error;
+
+ abo_src = ttm_to_amdgpu_bo(src->bo);
+ abo_dst = ttm_to_amdgpu_bo(dst->bo);
+ if (tmz)
+ copy_flags |= AMDGPU_COPY_FLAGS_TMZ;
+ if ((abo_src->flags & AMDGPU_GEM_CREATE_GFX12_DCC) &&
+ (abo_src->tbo.resource->mem_type == TTM_PL_VRAM))
+ copy_flags |= AMDGPU_COPY_FLAGS_READ_DECOMPRESSED;
+ if ((abo_dst->flags & AMDGPU_GEM_CREATE_GFX12_DCC) &&
+ (dst->mem->mem_type == TTM_PL_VRAM)) {
+ copy_flags |= AMDGPU_COPY_FLAGS_WRITE_COMPRESSED;
+ amdgpu_bo_get_tiling_flags(abo_dst, &tiling_flags);
+ max_com = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_MAX_COMPRESSED_BLOCK);
+ num_type = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_NUMBER_TYPE);
+ data_format = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_DATA_FORMAT);
+ write_compress_disable =
+ AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_WRITE_COMPRESS_DISABLE);
+ copy_flags |= (AMDGPU_COPY_FLAGS_SET(MAX_COMPRESSED, max_com) |
+ AMDGPU_COPY_FLAGS_SET(NUMBER_TYPE, num_type) |
+ AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format) |
+ AMDGPU_COPY_FLAGS_SET(WRITE_COMPRESS_DISABLE,
+ write_compress_disable));
+ }
+
+ r = amdgpu_copy_buffer(ring, from, to, cur_size, resv,
+ &next, false, true, copy_flags);
if (r)
goto error;
dma_fence_put(fence);
fence = next;
- num_pages -= cur_pages;
- if (!num_pages)
- break;
-
- old_size -= cur_pages;
- if (!old_size) {
- r = amdgpu_mm_node_addr(bo, ++old_mm, old_mem,
- &old_start);
- if (r)
- goto error;
- old_size = old_mm->size;
- } else {
- old_start += cur_pages * PAGE_SIZE;
- }
+ amdgpu_res_next(&src_mm, cur_size);
+ amdgpu_res_next(&dst_mm, cur_size);
+ }
+error:
+ mutex_unlock(&adev->mman.gtt_window_lock);
+ if (f)
+ *f = dma_fence_get(fence);
+ dma_fence_put(fence);
+ return r;
+}
+
+/*
+ * amdgpu_move_blit - Copy an entire buffer to another buffer
+ *
+ * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
+ * help move buffers to and from VRAM.
+ */
+static int amdgpu_move_blit(struct ttm_buffer_object *bo,
+ bool evict,
+ struct ttm_resource *new_mem,
+ struct ttm_resource *old_mem)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
+ struct amdgpu_copy_mem src, dst;
+ struct dma_fence *fence = NULL;
+ int r;
- new_size -= cur_pages;
- if (!new_size) {
- r = amdgpu_mm_node_addr(bo, ++new_mm, new_mem,
- &new_start);
- if (r)
- goto error;
+ src.bo = bo;
+ dst.bo = bo;
+ src.mem = old_mem;
+ dst.mem = new_mem;
+ src.offset = 0;
+ dst.offset = 0;
+
+ r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
+ new_mem->size,
+ amdgpu_bo_encrypted(abo),
+ bo->base.resv, &fence);
+ if (r)
+ goto error;
- new_size = new_mm->size;
- } else {
- new_start += cur_pages * PAGE_SIZE;
+ /* clear the space being freed */
+ if (old_mem->mem_type == TTM_PL_VRAM &&
+ (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
+ struct dma_fence *wipe_fence = NULL;
+
+ r = amdgpu_fill_buffer(abo, 0, NULL, &wipe_fence,
+ false, AMDGPU_KERNEL_JOB_ID_MOVE_BLIT);
+ if (r) {
+ goto error;
+ } else if (wipe_fence) {
+ amdgpu_vram_mgr_set_cleared(bo->resource);
+ dma_fence_put(fence);
+ fence = wipe_fence;
}
}
- r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
+ /* Always block for VM page tables before committing the new location */
+ if (bo->type == ttm_bo_type_kernel)
+ r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem);
+ else
+ r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
dma_fence_put(fence);
return r;
@@ -364,648 +433,917 @@ error:
return r;
}
-static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+/**
+ * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU
+ * @adev: amdgpu device
+ * @res: the resource to check
+ *
+ * Returns: true if the full resource is CPU visible, false otherwise.
+ */
+bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
+ struct ttm_resource *res)
{
- struct amdgpu_device *adev;
- struct ttm_mem_reg *old_mem = &bo->mem;
- struct ttm_mem_reg tmp_mem;
- struct ttm_place placements;
- struct ttm_placement placement;
- int r;
+ struct amdgpu_res_cursor cursor;
- adev = amdgpu_ttm_adev(bo->bdev);
- tmp_mem = *new_mem;
- tmp_mem.mm_node = NULL;
- placement.num_placement = 1;
- placement.placement = &placements;
- placement.num_busy_placement = 1;
- placement.busy_placement = &placements;
- placements.fpfn = 0;
- placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
- placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
- r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
- interruptible, no_wait_gpu);
- if (unlikely(r)) {
- return r;
- }
+ if (!res)
+ return false;
- r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
- if (unlikely(r)) {
- goto out_cleanup;
- }
+ if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
+ res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL ||
+ res->mem_type == AMDGPU_PL_MMIO_REMAP)
+ return true;
- r = ttm_tt_bind(bo->ttm, &tmp_mem);
- if (unlikely(r)) {
- goto out_cleanup;
- }
- r = amdgpu_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
- if (unlikely(r)) {
- goto out_cleanup;
+ if (res->mem_type != TTM_PL_VRAM)
+ return false;
+
+ amdgpu_res_first(res, 0, res->size, &cursor);
+ while (cursor.remaining) {
+ if ((cursor.start + cursor.size) > adev->gmc.visible_vram_size)
+ return false;
+ amdgpu_res_next(&cursor, cursor.size);
}
- r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem);
-out_cleanup:
- ttm_bo_mem_put(bo, &tmp_mem);
- return r;
+
+ return true;
}
-static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+/*
+ * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy
+ *
+ * Called by amdgpu_bo_move()
+ */
+static bool amdgpu_res_copyable(struct amdgpu_device *adev,
+ struct ttm_resource *mem)
{
- struct amdgpu_device *adev;
- struct ttm_mem_reg *old_mem = &bo->mem;
- struct ttm_mem_reg tmp_mem;
- struct ttm_placement placement;
- struct ttm_place placements;
- int r;
+ if (!amdgpu_res_cpu_visible(adev, mem))
+ return false;
- adev = amdgpu_ttm_adev(bo->bdev);
- tmp_mem = *new_mem;
- tmp_mem.mm_node = NULL;
- placement.num_placement = 1;
- placement.placement = &placements;
- placement.num_busy_placement = 1;
- placement.busy_placement = &placements;
- placements.fpfn = 0;
- placements.lpfn = adev->mc.gtt_size >> PAGE_SHIFT;
- placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
- r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
- interruptible, no_wait_gpu);
- if (unlikely(r)) {
- return r;
- }
- r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem);
- if (unlikely(r)) {
- goto out_cleanup;
- }
- r = amdgpu_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
- if (unlikely(r)) {
- goto out_cleanup;
- }
-out_cleanup:
- ttm_bo_mem_put(bo, &tmp_mem);
- return r;
+ /* ttm_resource_ioremap only supports contiguous memory */
+ if (mem->mem_type == TTM_PL_VRAM &&
+ !(mem->placement & TTM_PL_FLAG_CONTIGUOUS))
+ return false;
+
+ return true;
}
-static int amdgpu_bo_move(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+/*
+ * amdgpu_bo_move - Move a buffer object to a new memory location
+ *
+ * Called by ttm_bo_handle_move_mem()
+ */
+static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_mem,
+ struct ttm_place *hop)
{
struct amdgpu_device *adev;
struct amdgpu_bo *abo;
- struct ttm_mem_reg *old_mem = &bo->mem;
+ struct ttm_resource *old_mem = bo->resource;
int r;
- /* Can't move a pinned BO */
- abo = container_of(bo, struct amdgpu_bo, tbo);
- if (WARN_ON_ONCE(abo->pin_count > 0))
- return -EINVAL;
+ if (new_mem->mem_type == TTM_PL_TT ||
+ new_mem->mem_type == AMDGPU_PL_PREEMPT) {
+ r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
+ if (r)
+ return r;
+ }
+ abo = ttm_to_amdgpu_bo(bo);
adev = amdgpu_ttm_adev(bo->bdev);
- if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
- amdgpu_move_null(bo, new_mem);
+ if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM &&
+ bo->ttm == NULL)) {
+ amdgpu_bo_move_notify(bo, evict, new_mem);
+ ttm_bo_move_null(bo, new_mem);
return 0;
}
- if ((old_mem->mem_type == TTM_PL_TT &&
- new_mem->mem_type == TTM_PL_SYSTEM) ||
- (old_mem->mem_type == TTM_PL_SYSTEM &&
- new_mem->mem_type == TTM_PL_TT)) {
- /* bind is enough */
- amdgpu_move_null(bo, new_mem);
+ if (old_mem->mem_type == TTM_PL_SYSTEM &&
+ (new_mem->mem_type == TTM_PL_TT ||
+ new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
+ amdgpu_bo_move_notify(bo, evict, new_mem);
+ ttm_bo_move_null(bo, new_mem);
return 0;
}
- if (adev->mman.buffer_funcs == NULL ||
- adev->mman.buffer_funcs_ring == NULL ||
- !adev->mman.buffer_funcs_ring->ready) {
- /* use memcpy */
- goto memcpy;
+ if ((old_mem->mem_type == TTM_PL_TT ||
+ old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
+ new_mem->mem_type == TTM_PL_SYSTEM) {
+ r = ttm_bo_wait_ctx(bo, ctx);
+ if (r)
+ return r;
+
+ amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
+ amdgpu_bo_move_notify(bo, evict, new_mem);
+ ttm_resource_free(bo, &bo->resource);
+ ttm_bo_assign_mem(bo, new_mem);
+ return 0;
}
- if (old_mem->mem_type == TTM_PL_VRAM &&
- new_mem->mem_type == TTM_PL_SYSTEM) {
- r = amdgpu_move_vram_ram(bo, evict, interruptible,
- no_wait_gpu, new_mem);
- } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
- new_mem->mem_type == TTM_PL_VRAM) {
- r = amdgpu_move_ram_vram(bo, evict, interruptible,
- no_wait_gpu, new_mem);
- } else {
- r = amdgpu_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
+ if (old_mem->mem_type == AMDGPU_PL_GDS ||
+ old_mem->mem_type == AMDGPU_PL_GWS ||
+ old_mem->mem_type == AMDGPU_PL_OA ||
+ old_mem->mem_type == AMDGPU_PL_DOORBELL ||
+ old_mem->mem_type == AMDGPU_PL_MMIO_REMAP ||
+ new_mem->mem_type == AMDGPU_PL_GDS ||
+ new_mem->mem_type == AMDGPU_PL_GWS ||
+ new_mem->mem_type == AMDGPU_PL_OA ||
+ new_mem->mem_type == AMDGPU_PL_DOORBELL ||
+ new_mem->mem_type == AMDGPU_PL_MMIO_REMAP) {
+ /* Nothing to save here */
+ amdgpu_bo_move_notify(bo, evict, new_mem);
+ ttm_bo_move_null(bo, new_mem);
+ return 0;
}
+ if (bo->type == ttm_bo_type_device &&
+ new_mem->mem_type == TTM_PL_VRAM &&
+ old_mem->mem_type != TTM_PL_VRAM) {
+ /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
+ * accesses the BO after it's moved.
+ */
+ abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ }
+
+ if (adev->mman.buffer_funcs_enabled &&
+ ((old_mem->mem_type == TTM_PL_SYSTEM &&
+ new_mem->mem_type == TTM_PL_VRAM) ||
+ (old_mem->mem_type == TTM_PL_VRAM &&
+ new_mem->mem_type == TTM_PL_SYSTEM))) {
+ hop->fpfn = 0;
+ hop->lpfn = 0;
+ hop->mem_type = TTM_PL_TT;
+ hop->flags = TTM_PL_FLAG_TEMPORARY;
+ return -EMULTIHOP;
+ }
+
+ amdgpu_bo_move_notify(bo, evict, new_mem);
+ if (adev->mman.buffer_funcs_enabled)
+ r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
+ else
+ r = -ENODEV;
+
if (r) {
-memcpy:
- r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
- if (r) {
+ /* Check that all memory is CPU accessible */
+ if (!amdgpu_res_copyable(adev, old_mem) ||
+ !amdgpu_res_copyable(adev, new_mem)) {
+ pr_err("Move buffer fallback to memcpy unavailable\n");
return r;
}
+
+ r = ttm_bo_move_memcpy(bo, ctx, new_mem);
+ if (r)
+ return r;
}
- /* update statistics */
- atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
+ /* update statistics after the move */
+ if (evict)
+ atomic64_inc(&adev->num_evictions);
+ atomic64_add(bo->base.size, &adev->num_bytes_moved);
return 0;
}
-static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+/*
+ * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault
+ *
+ * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
+ */
+static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
+ struct ttm_resource *mem)
{
- struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- mem->bus.addr = NULL;
- mem->bus.offset = 0;
- mem->bus.size = mem->num_pages << PAGE_SHIFT;
- mem->bus.base = 0;
- mem->bus.is_iomem = false;
- if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
- return -EINVAL;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
/* system memory */
return 0;
case TTM_PL_TT:
+ case AMDGPU_PL_PREEMPT:
break;
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
- /* check if it's visible */
- if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size)
- return -EINVAL;
- mem->bus.base = adev->mc.aper_base;
+
+ if (adev->mman.aper_base_kaddr &&
+ mem->placement & TTM_PL_FLAG_CONTIGUOUS)
+ mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
+ mem->bus.offset;
+
+ mem->bus.offset += adev->gmc.aper_base;
mem->bus.is_iomem = true;
break;
+ case AMDGPU_PL_DOORBELL:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.offset += adev->doorbell.base;
+ mem->bus.is_iomem = true;
+ mem->bus.caching = ttm_uncached;
+ break;
+ case AMDGPU_PL_MMIO_REMAP:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.offset += adev->rmmio_remap.bus_addr;
+ mem->bus.is_iomem = true;
+ mem->bus.caching = ttm_uncached;
+ break;
default:
return -EINVAL;
}
return 0;
}
-static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
+static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
+ unsigned long page_offset)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ struct amdgpu_res_cursor cursor;
+
+ amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0,
+ &cursor);
+
+ if (bo->resource->mem_type == AMDGPU_PL_DOORBELL)
+ return ((uint64_t)(adev->doorbell.base + cursor.start)) >> PAGE_SHIFT;
+ else if (bo->resource->mem_type == AMDGPU_PL_MMIO_REMAP)
+ return ((uint64_t)(adev->rmmio_remap.bus_addr + cursor.start)) >> PAGE_SHIFT;
+
+ return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
}
-static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
- unsigned long page_offset)
+/**
+ * amdgpu_ttm_domain_start - Returns GPU start address
+ * @adev: amdgpu device object
+ * @type: type of the memory
+ *
+ * Returns:
+ * GPU start address of a memory domain
+ */
+
+uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
{
- struct drm_mm_node *mm = bo->mem.mm_node;
- uint64_t size = mm->size;
- uint64_t offset = page_offset;
+ switch (type) {
+ case TTM_PL_TT:
+ return adev->gmc.gart_start;
+ case TTM_PL_VRAM:
+ return adev->gmc.vram_start;
+ }
- page_offset = do_div(offset, size);
- mm += offset;
- return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + page_offset;
+ return 0;
}
/*
* TTM backend functions.
*/
-struct amdgpu_ttm_gup_task_list {
- struct list_head list;
- struct task_struct *task;
-};
-
struct amdgpu_ttm_tt {
- struct ttm_dma_tt ttm;
- struct amdgpu_device *adev;
+ struct ttm_tt ttm;
+ struct drm_gem_object *gobj;
u64 offset;
uint64_t userptr;
- struct mm_struct *usermm;
+ struct task_struct *usertask;
uint32_t userflags;
- spinlock_t guptasklock;
- struct list_head guptasks;
- atomic_t mmu_invalidations;
- struct list_head list;
+ bool bound;
+ int32_t pool_id;
};
-int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
-{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
- unsigned int flags = 0;
- unsigned pinned = 0;
- int r;
+#define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm)
- if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
- flags |= FOLL_WRITE;
+#ifdef CONFIG_DRM_AMDGPU_USERPTR
+/*
+ * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
+ * memory and start HMM tracking CPU page table update
+ *
+ * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
+ * once afterwards to stop HMM tracking
+ */
+int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
+ struct hmm_range **range)
+{
+ struct ttm_tt *ttm = bo->tbo.ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
+ unsigned long start = gtt->userptr;
+ struct vm_area_struct *vma;
+ struct mm_struct *mm;
+ bool readonly;
+ int r = 0;
+
+ /* Make sure get_user_pages_done() can cleanup gracefully */
+ *range = NULL;
+
+ mm = bo->notifier.mm;
+ if (unlikely(!mm)) {
+ DRM_DEBUG_DRIVER("BO is not registered?\n");
+ return -EFAULT;
+ }
+
+ if (!mmget_not_zero(mm)) /* Happens during process shutdown */
+ return -ESRCH;
+
+ mmap_read_lock(mm);
+ vma = vma_lookup(mm, start);
+ if (unlikely(!vma)) {
+ r = -EFAULT;
+ goto out_unlock;
+ }
+ if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
+ vma->vm_file)) {
+ r = -EPERM;
+ goto out_unlock;
+ }
+
+ readonly = amdgpu_ttm_tt_is_readonly(ttm);
+ r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages,
+ readonly, NULL, range);
+out_unlock:
+ mmap_read_unlock(mm);
+ if (r)
+ pr_debug("failed %d to get user pages 0x%lx\n", r, start);
- if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
- /* check that we only use anonymous memory
- to prevent problems with writeback */
- unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
- struct vm_area_struct *vma;
+ mmput(mm);
- vma = find_vma(gtt->usermm, gtt->userptr);
- if (!vma || vma->vm_file || vma->vm_end < end)
- return -EPERM;
- }
+ return r;
+}
- do {
- unsigned num_pages = ttm->num_pages - pinned;
- uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
- struct page **p = pages + pinned;
- struct amdgpu_ttm_gup_task_list guptask;
+/* amdgpu_ttm_tt_discard_user_pages - Discard range and pfn array allocations
+ */
+void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
+ struct hmm_range *range)
+{
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
- guptask.task = current;
- spin_lock(&gtt->guptasklock);
- list_add(&guptask.list, &gtt->guptasks);
- spin_unlock(&gtt->guptasklock);
+ if (gtt && gtt->userptr && range)
+ amdgpu_hmm_range_get_pages_done(range);
+}
- r = get_user_pages(userptr, num_pages, flags, p, NULL);
+/*
+ * amdgpu_ttm_tt_get_user_pages_done - stop HMM track the CPU page table change
+ * Check if the pages backing this ttm range have been invalidated
+ *
+ * Returns: true if pages are still valid
+ */
+bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
+ struct hmm_range *range)
+{
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
- spin_lock(&gtt->guptasklock);
- list_del(&guptask.list);
- spin_unlock(&gtt->guptasklock);
+ if (!gtt || !gtt->userptr || !range)
+ return false;
- if (r < 0)
- goto release_pages;
+ DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
+ gtt->userptr, ttm->num_pages);
- pinned += r;
+ WARN_ONCE(!range->hmm_pfns, "No user pages to check\n");
- } while (pinned < ttm->num_pages);
+ return !amdgpu_hmm_range_get_pages_done(range);
+}
+#endif
- return 0;
+/*
+ * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
+ *
+ * Called by amdgpu_cs_list_validate(). This creates the page list
+ * that backs user memory and will ultimately be mapped into the device
+ * address space.
+ */
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range)
+{
+ unsigned long i;
-release_pages:
- release_pages(pages, pinned, 0);
- return r;
+ for (i = 0; i < ttm->num_pages; ++i)
+ ttm->pages[i] = range ? hmm_pfn_to_page(range->hmm_pfns[i]) : NULL;
}
-/* prepare the sg table with the user pages */
-static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
+/*
+ * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
+ *
+ * Called by amdgpu_ttm_backend_bind()
+ **/
+static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
+ struct ttm_tt *ttm)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
- unsigned nents;
- int r;
-
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+ int r;
+ /* Allocate an SG array and squash pages into it */
r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
- ttm->num_pages << PAGE_SHIFT,
+ (u64)ttm->num_pages << PAGE_SHIFT,
GFP_KERNEL);
if (r)
goto release_sg;
- r = -ENOMEM;
- nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
- if (nents != ttm->sg->nents)
- goto release_sg;
+ /* Map SG to device */
+ r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0);
+ if (r)
+ goto release_sg_table;
- drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
- gtt->ttm.dma_address, ttm->num_pages);
+ /* convert SG to linear array of pages and dma addresses */
+ drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
+ ttm->num_pages);
return 0;
+release_sg_table:
+ sg_free_table(ttm->sg);
release_sg:
kfree(ttm->sg);
+ ttm->sg = NULL;
return r;
}
-static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
+/*
+ * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
+ */
+static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
+ struct ttm_tt *ttm)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
- struct sg_page_iter sg_iter;
-
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
/* double check that we don't free the table twice */
- if (!ttm->sg->sgl)
+ if (!ttm->sg || !ttm->sg->sgl)
return;
- /* free the sg table and pages again */
- dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
-
- for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
- struct page *page = sg_page_iter_page(&sg_iter);
- if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
- set_page_dirty(page);
+ /* unmap the pages mapped to the device */
+ dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
+ sg_free_table(ttm->sg);
+}
- mark_page_accessed(page);
- put_page(page);
+/*
+ * total_pages is constructed as MQD0+CtrlStack0 + MQD1+CtrlStack1 + ...
+ * MQDn+CtrlStackn where n is the number of XCCs per partition.
+ * pages_per_xcc is the size of one MQD+CtrlStack. The first page is MQD
+ * and uses memory type default, UC. The rest of pages_per_xcc are
+ * Ctrl stack and modify their memory type to NC.
+ */
+static void amdgpu_ttm_gart_bind_gfx9_mqd(struct amdgpu_device *adev,
+ struct ttm_tt *ttm, uint64_t flags)
+{
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ uint64_t total_pages = ttm->num_pages;
+ int num_xcc = max(1U, adev->gfx.num_xcc_per_xcp);
+ uint64_t page_idx, pages_per_xcc;
+ int i;
+ uint64_t ctrl_flags = AMDGPU_PTE_MTYPE_VG10(flags, AMDGPU_MTYPE_NC);
+
+ pages_per_xcc = total_pages;
+ do_div(pages_per_xcc, num_xcc);
+
+ for (i = 0, page_idx = 0; i < num_xcc; i++, page_idx += pages_per_xcc) {
+ /* MQD page: use default flags */
+ amdgpu_gart_bind(adev,
+ gtt->offset + (page_idx << PAGE_SHIFT),
+ 1, &gtt->ttm.dma_address[page_idx], flags);
+ /*
+ * Ctrl pages - modify the memory type to NC (ctrl_flags) from
+ * the second page of the BO onward.
+ */
+ amdgpu_gart_bind(adev,
+ gtt->offset + ((page_idx + 1) << PAGE_SHIFT),
+ pages_per_xcc - 1,
+ &gtt->ttm.dma_address[page_idx + 1],
+ ctrl_flags);
}
+}
- sg_free_table(ttm->sg);
+static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
+ struct ttm_buffer_object *tbo,
+ uint64_t flags)
+{
+ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
+ struct ttm_tt *ttm = tbo->ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
+
+ if (amdgpu_bo_encrypted(abo))
+ flags |= AMDGPU_PTE_TMZ;
+
+ if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
+ amdgpu_ttm_gart_bind_gfx9_mqd(adev, ttm, flags);
+ } else {
+ amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
+ gtt->ttm.dma_address, flags);
+ }
+ gtt->bound = true;
}
-static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
- struct ttm_mem_reg *bo_mem)
+/*
+ * amdgpu_ttm_backend_bind - Bind GTT memory
+ *
+ * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
+ * This handles binding GTT memory to the device address space.
+ */
+static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
+ struct ttm_tt *ttm,
+ struct ttm_resource *bo_mem)
{
- struct amdgpu_ttm_tt *gtt = (void*)ttm;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
+ uint64_t flags;
int r;
+ if (!bo_mem)
+ return -EINVAL;
+
+ if (gtt->bound)
+ return 0;
+
if (gtt->userptr) {
- r = amdgpu_ttm_tt_pin_userptr(ttm);
+ r = amdgpu_ttm_tt_pin_userptr(bdev, ttm);
if (r) {
- DRM_ERROR("failed to pin userptr\n");
+ dev_err(adev->dev, "failed to pin userptr\n");
return r;
}
+ } else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) {
+ if (!ttm->sg) {
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+
+ attach = gtt->gobj->import_attach;
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ ttm->sg = sgt;
+ }
+
+ drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
+ ttm->num_pages);
}
+
if (!ttm->num_pages) {
- WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+ WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
ttm->num_pages, bo_mem, ttm);
}
- if (bo_mem->mem_type == AMDGPU_PL_GDS ||
- bo_mem->mem_type == AMDGPU_PL_GWS ||
- bo_mem->mem_type == AMDGPU_PL_OA)
- return -EINVAL;
-
- return 0;
-}
+ if (bo_mem->mem_type != TTM_PL_TT ||
+ !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
+ gtt->offset = AMDGPU_BO_INVALID_OFFSET;
+ return 0;
+ }
-bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
-{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ /* compute PTE flags relevant to this BO memory */
+ flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem);
- return gtt && !list_empty(&gtt->list);
+ /* bind pages into GART page tables */
+ gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
+ amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
+ gtt->ttm.dma_address, flags);
+ gtt->bound = true;
+ return 0;
}
-int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
+/*
+ * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either
+ * through AGP or GART aperture.
+ *
+ * If bo is accessible through AGP aperture, then use AGP aperture
+ * to access bo; otherwise allocate logical space in GART aperture
+ * and map bo to GART aperture.
+ */
+int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
{
- struct ttm_tt *ttm = bo->ttm;
- struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
- uint64_t flags;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
+ struct ttm_placement placement;
+ struct ttm_place placements;
+ struct ttm_resource *tmp;
+ uint64_t addr, flags;
int r;
- if (!ttm || amdgpu_ttm_is_bound(ttm))
+ if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET)
return 0;
- r = amdgpu_gtt_mgr_alloc(&bo->bdev->man[TTM_PL_TT], bo,
- NULL, bo_mem);
- if (r) {
- DRM_ERROR("Failed to allocate GTT address space (%d)\n", r);
- return r;
- }
+ addr = amdgpu_gmc_agp_addr(bo);
+ if (addr != AMDGPU_BO_INVALID_OFFSET)
+ return 0;
- flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
- gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
- r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
- ttm->pages, gtt->ttm.dma_address, flags);
+ /* allocate GART space */
+ placement.num_placement = 1;
+ placement.placement = &placements;
+ placements.fpfn = 0;
+ placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
+ placements.mem_type = TTM_PL_TT;
+ placements.flags = bo->resource->placement;
- if (r) {
- DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
- ttm->num_pages, gtt->offset);
+ r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
+ if (unlikely(r))
return r;
- }
- spin_lock(&gtt->adev->gtt_list_lock);
- list_add_tail(&gtt->list, &gtt->adev->gtt_list);
- spin_unlock(&gtt->adev->gtt_list_lock);
+
+ /* compute PTE flags for this buffer object */
+ flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
+
+ /* Bind pages */
+ gtt->offset = (u64)tmp->start << PAGE_SHIFT;
+ amdgpu_ttm_gart_bind(adev, bo, flags);
+ amdgpu_gart_invalidate_tlb(adev);
+ ttm_resource_free(bo, &bo->resource);
+ ttm_bo_assign_mem(bo, tmp);
+
return 0;
}
-int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
+/*
+ * amdgpu_ttm_recover_gart - Rebind GTT pages
+ *
+ * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to
+ * rebind GTT pages during a GPU reset.
+ */
+void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
{
- struct amdgpu_ttm_tt *gtt, *tmp;
- struct ttm_mem_reg bo_mem;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
uint64_t flags;
- int r;
- bo_mem.mem_type = TTM_PL_TT;
- spin_lock(&adev->gtt_list_lock);
- list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
- flags = amdgpu_ttm_tt_pte_flags(gtt->adev, &gtt->ttm.ttm, &bo_mem);
- r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
- gtt->ttm.ttm.pages, gtt->ttm.dma_address,
- flags);
- if (r) {
- spin_unlock(&adev->gtt_list_lock);
- DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
- gtt->ttm.ttm.num_pages, gtt->offset);
- return r;
- }
- }
- spin_unlock(&adev->gtt_list_lock);
- return 0;
+ if (!tbo->ttm)
+ return;
+
+ flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
+ amdgpu_ttm_gart_bind(adev, tbo, flags);
}
-static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
+/*
+ * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages
+ *
+ * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
+ * ttm_tt_destroy().
+ */
+static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
+ struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
- if (gtt->userptr)
- amdgpu_ttm_tt_unpin_userptr(ttm);
+ /* if the pages have userptr pinning then clear that first */
+ if (gtt->userptr) {
+ amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
+ } else if (ttm->sg && drm_gem_is_imported(gtt->gobj)) {
+ struct dma_buf_attachment *attach;
- if (!amdgpu_ttm_is_bound(ttm))
- return 0;
+ attach = gtt->gobj->import_attach;
+ dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
+ ttm->sg = NULL;
+ }
- /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
- if (gtt->adev->gart.ready)
- amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
+ if (!gtt->bound)
+ return;
- spin_lock(&gtt->adev->gtt_list_lock);
- list_del_init(&gtt->list);
- spin_unlock(&gtt->adev->gtt_list_lock);
+ if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
+ return;
- return 0;
+ /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
+ amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
+ gtt->bound = false;
}
-static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm)
+static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
+ struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
- ttm_dma_tt_fini(&gtt->ttm);
+ if (gtt->usertask)
+ put_task_struct(gtt->usertask);
+
+ ttm_tt_fini(&gtt->ttm);
kfree(gtt);
}
-static struct ttm_backend_func amdgpu_backend_func = {
- .bind = &amdgpu_ttm_backend_bind,
- .unbind = &amdgpu_ttm_backend_unbind,
- .destroy = &amdgpu_ttm_backend_destroy,
-};
-
-static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page)
+/**
+ * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO
+ *
+ * @bo: The buffer object to create a GTT ttm_tt object around
+ * @page_flags: Page flags to be added to the ttm_tt object
+ *
+ * Called by ttm_tt_create().
+ */
+static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
+ uint32_t page_flags)
{
- struct amdgpu_device *adev;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
struct amdgpu_ttm_tt *gtt;
-
- adev = amdgpu_ttm_adev(bdev);
+ enum ttm_caching caching;
gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
- if (gtt == NULL) {
+ if (!gtt)
return NULL;
- }
- gtt->ttm.ttm.func = &amdgpu_backend_func;
- gtt->adev = adev;
- if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
+
+ gtt->gobj = &bo->base;
+ if (adev->gmc.mem_partitions && abo->xcp_id >= 0)
+ gtt->pool_id = KFD_XCP_MEM_ID(adev, abo->xcp_id);
+ else
+ gtt->pool_id = abo->xcp_id;
+
+ if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
+ caching = ttm_write_combined;
+ else
+ caching = ttm_cached;
+
+ /* allocate space for the uninitialized page entries */
+ if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags, caching)) {
kfree(gtt);
return NULL;
}
- INIT_LIST_HEAD(&gtt->list);
- return &gtt->ttm.ttm;
+ return &gtt->ttm;
}
-static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
+/*
+ * amdgpu_ttm_tt_populate - Map GTT pages visible to the device
+ *
+ * Map the pages of a ttm_tt object to an address space visible
+ * to the underlying device.
+ */
+static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
+ struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
{
- struct amdgpu_device *adev;
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
- unsigned i;
- int r;
- bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
-
- if (ttm->state != tt_unpopulated)
- return 0;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
+ struct ttm_pool *pool;
+ pgoff_t i;
+ int ret;
- if (gtt && gtt->userptr) {
+ /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
+ if (gtt->userptr) {
ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!ttm->sg)
return -ENOMEM;
-
- ttm->page_flags |= TTM_PAGE_FLAG_SG;
- ttm->state = tt_unbound;
return 0;
}
- if (slave && ttm->sg) {
- drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
- gtt->ttm.dma_address, ttm->num_pages);
- ttm->state = tt_unbound;
+ if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
return 0;
- }
-
- adev = amdgpu_ttm_adev(ttm->bdev);
-#ifdef CONFIG_SWIOTLB
- if (swiotlb_nr_tbl()) {
- return ttm_dma_populate(&gtt->ttm, adev->dev);
- }
-#endif
+ if (adev->mman.ttm_pools && gtt->pool_id >= 0)
+ pool = &adev->mman.ttm_pools[gtt->pool_id];
+ else
+ pool = &adev->mman.bdev.pool;
+ ret = ttm_pool_alloc(pool, ttm, ctx);
+ if (ret)
+ return ret;
- r = ttm_pool_populate(ttm);
- if (r) {
- return r;
- }
+ for (i = 0; i < ttm->num_pages; ++i)
+ ttm->pages[i]->mapping = bdev->dev_mapping;
- for (i = 0; i < ttm->num_pages; i++) {
- gtt->ttm.dma_address[i] = pci_map_page(adev->pdev, ttm->pages[i],
- 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
- while (i--) {
- pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- gtt->ttm.dma_address[i] = 0;
- }
- ttm_pool_unpopulate(ttm);
- return -EFAULT;
- }
- }
return 0;
}
-static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
+/*
+ * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays
+ *
+ * Unmaps pages of a ttm_tt object from the device address space and
+ * unpopulates the page array backing it.
+ */
+static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
+ struct ttm_tt *ttm)
{
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
struct amdgpu_device *adev;
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
- unsigned i;
- bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+ struct ttm_pool *pool;
+ pgoff_t i;
- if (gtt && gtt->userptr) {
+ amdgpu_ttm_backend_unbind(bdev, ttm);
+
+ if (gtt->userptr) {
+ amdgpu_ttm_tt_set_user_pages(ttm, NULL);
kfree(ttm->sg);
- ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
+ ttm->sg = NULL;
return;
}
- if (slave)
+ if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
return;
- adev = amdgpu_ttm_adev(ttm->bdev);
+ for (i = 0; i < ttm->num_pages; ++i)
+ ttm->pages[i]->mapping = NULL;
-#ifdef CONFIG_SWIOTLB
- if (swiotlb_nr_tbl()) {
- ttm_dma_unpopulate(&gtt->ttm, adev->dev);
- return;
- }
-#endif
+ adev = amdgpu_ttm_adev(bdev);
- for (i = 0; i < ttm->num_pages; i++) {
- if (gtt->ttm.dma_address[i]) {
- pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- }
- }
+ if (adev->mman.ttm_pools && gtt->pool_id >= 0)
+ pool = &adev->mman.ttm_pools[gtt->pool_id];
+ else
+ pool = &adev->mman.bdev.pool;
- ttm_pool_unpopulate(ttm);
+ return ttm_pool_free(pool, ttm);
}
-int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
- uint32_t flags)
+/**
+ * amdgpu_ttm_tt_get_userptr - Return the userptr GTT ttm_tt for the current
+ * task
+ *
+ * @tbo: The ttm_buffer_object that contains the userptr
+ * @user_addr: The returned value
+ */
+int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
+ uint64_t *user_addr)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt;
- if (gtt == NULL)
+ if (!tbo->ttm)
return -EINVAL;
+ gtt = (void *)tbo->ttm;
+ *user_addr = gtt->userptr;
+ return 0;
+}
+
+/**
+ * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
+ * task
+ *
+ * @bo: The ttm_buffer_object to bind this userptr to
+ * @addr: The address in the current tasks VM space to use
+ * @flags: Requirements of userptr object.
+ *
+ * Called by amdgpu_gem_userptr_ioctl() and kfd_ioctl_alloc_memory_of_gpu() to
+ * bind userptr pages to current task and by kfd_ioctl_acquire_vm() to
+ * initialize GPU VM for a KFD process.
+ */
+int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
+ uint64_t addr, uint32_t flags)
+{
+ struct amdgpu_ttm_tt *gtt;
+
+ if (!bo->ttm) {
+ /* TODO: We want a separate TTM object type for userptrs */
+ bo->ttm = amdgpu_ttm_tt_create(bo, 0);
+ if (bo->ttm == NULL)
+ return -ENOMEM;
+ }
+
+ /* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
+ bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
+
+ gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
gtt->userptr = addr;
- gtt->usermm = current->mm;
gtt->userflags = flags;
- spin_lock_init(&gtt->guptasklock);
- INIT_LIST_HEAD(&gtt->guptasks);
- atomic_set(&gtt->mmu_invalidations, 0);
+
+ if (gtt->usertask)
+ put_task_struct(gtt->usertask);
+ gtt->usertask = current->group_leader;
+ get_task_struct(gtt->usertask);
return 0;
}
+/*
+ * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object
+ */
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (gtt == NULL)
return NULL;
- return gtt->usermm;
+ if (gtt->usertask == NULL)
+ return NULL;
+
+ return gtt->usertask->mm;
}
+/*
+ * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
+ * address range for the current task.
+ *
+ */
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
- unsigned long end)
+ unsigned long end, unsigned long *userptr)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
- struct amdgpu_ttm_gup_task_list *entry;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
unsigned long size;
if (gtt == NULL || !gtt->userptr)
return false;
- size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
+ /* Return false if no part of the ttm_tt object lies within
+ * the range
+ */
+ size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
if (gtt->userptr > end || gtt->userptr + size <= start)
return false;
- spin_lock(&gtt->guptasklock);
- list_for_each_entry(entry, &gtt->guptasks, list) {
- if (entry->task == current) {
- spin_unlock(&gtt->guptasklock);
- return false;
- }
- }
- spin_unlock(&gtt->guptasklock);
-
- atomic_inc(&gtt->mmu_invalidations);
-
+ if (userptr)
+ *userptr = gtt->userptr;
return true;
}
-bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
- int *last_invalidated)
+/*
+ * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
+ */
+bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
- int prev_invalidated = *last_invalidated;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
+
+ if (gtt == NULL || !gtt->userptr)
+ return false;
- *last_invalidated = atomic_read(&gtt->mmu_invalidations);
- return prev_invalidated != *last_invalidated;
+ return true;
}
+/*
+ * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only?
+ */
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
if (gtt == NULL)
return false;
@@ -1013,21 +1351,52 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
}
-uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
- struct ttm_mem_reg *mem)
+/**
+ * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object
+ *
+ * @ttm: The ttm_tt object to compute the flags for
+ * @mem: The memory registry backing this ttm_tt object
+ *
+ * Figure out the flags to use for a VM PDE (Page Directory Entry).
+ */
+uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
{
uint64_t flags = 0;
if (mem && mem->mem_type != TTM_PL_SYSTEM)
flags |= AMDGPU_PTE_VALID;
- if (mem && mem->mem_type == TTM_PL_TT) {
+ if (mem && (mem->mem_type == TTM_PL_TT ||
+ mem->mem_type == AMDGPU_PL_DOORBELL ||
+ mem->mem_type == AMDGPU_PL_PREEMPT ||
+ mem->mem_type == AMDGPU_PL_MMIO_REMAP)) {
flags |= AMDGPU_PTE_SYSTEM;
- if (ttm->caching_state == tt_cached)
+ if (ttm->caching == ttm_cached)
flags |= AMDGPU_PTE_SNOOPED;
}
+ if (mem && mem->mem_type == TTM_PL_VRAM &&
+ mem->bus.caching == ttm_cached)
+ flags |= AMDGPU_PTE_SNOOPED;
+
+ return flags;
+}
+
+/**
+ * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object
+ *
+ * @adev: amdgpu_device pointer
+ * @ttm: The ttm_tt object to compute the flags for
+ * @mem: The memory registry backing this ttm_tt object
+ *
+ * Figure out the flags to use for a VM PTE (Page Table Entry).
+ */
+uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
+ struct ttm_resource *mem)
+{
+ uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
+
flags |= adev->gart.gart_pte_flags;
flags |= AMDGPU_PTE_READABLE;
@@ -1037,261 +1406,913 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
return flags;
}
+/*
+ * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
+ * object.
+ *
+ * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
+ * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
+ * it can find space for a new object and by ttm_bo_force_list_clean() which is
+ * used to clean out a memory space.
+ */
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
- unsigned long num_pages = bo->mem.num_pages;
- struct drm_mm_node *node = bo->mem.mm_node;
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *f;
- if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
+ if (!amdgpu_bo_is_amdgpu_bo(bo))
return ttm_bo_eviction_valuable(bo, place);
- switch (bo->mem.mem_type) {
- case TTM_PL_TT:
+ /* Swapout? */
+ if (bo->resource->mem_type == TTM_PL_SYSTEM)
return true;
- case TTM_PL_VRAM:
- /* Check each drm MM node individually */
- while (num_pages) {
- if (place->fpfn < (node->start + node->size) &&
- !(place->lpfn && place->lpfn <= node->start))
- return true;
-
- num_pages -= node->size;
- ++node;
- }
- break;
+ if (bo->type == ttm_bo_type_kernel &&
+ !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
+ return false;
- default:
- break;
+ /* If bo is a KFD BO, check if the bo belongs to the current process.
+ * If true, then return false as any KFD process needs all its BOs to
+ * be resident to run successfully
+ */
+ dma_resv_for_each_fence(&resv_cursor, bo->base.resv,
+ DMA_RESV_USAGE_BOOKKEEP, f) {
+ if (amdkfd_fence_check_mm(f, current->mm) &&
+ !(place->flags & TTM_PL_FLAG_CONTIGUOUS))
+ return false;
}
+ /* Preemptible BOs don't own system resources managed by the
+ * driver (pages, VRAM, GART space). They point to resources
+ * owned by someone else (e.g. pageable memory in user mode
+ * or a DMABuf). They are used in a preemptible context so we
+ * can guarantee no deadlocks and good QoS in case of MMU
+ * notifiers or DMABuf move notifiers from the resource owner.
+ */
+ if (bo->resource->mem_type == AMDGPU_PL_PREEMPT)
+ return false;
+
+ if (bo->resource->mem_type == TTM_PL_TT &&
+ amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
+ return false;
+
return ttm_bo_eviction_valuable(bo, place);
}
-static struct ttm_bo_driver amdgpu_bo_driver = {
+static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos,
+ void *buf, size_t size, bool write)
+{
+ while (size) {
+ uint64_t aligned_pos = ALIGN_DOWN(pos, 4);
+ uint64_t bytes = 4 - (pos & 0x3);
+ uint32_t shift = (pos & 0x3) * 8;
+ uint32_t mask = 0xffffffff << shift;
+ uint32_t value = 0;
+
+ if (size < bytes) {
+ mask &= 0xffffffff >> (bytes - size) * 8;
+ bytes = size;
+ }
+
+ if (mask != 0xffffffff) {
+ amdgpu_device_mm_access(adev, aligned_pos, &value, 4, false);
+ if (write) {
+ value &= ~mask;
+ value |= (*(uint32_t *)buf << shift) & mask;
+ amdgpu_device_mm_access(adev, aligned_pos, &value, 4, true);
+ } else {
+ value = (value & mask) >> shift;
+ memcpy(buf, &value, bytes);
+ }
+ } else {
+ amdgpu_device_mm_access(adev, aligned_pos, buf, 4, write);
+ }
+
+ pos += bytes;
+ buf += bytes;
+ size -= bytes;
+ }
+}
+
+static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
+ unsigned long offset, void *buf,
+ int len, int write)
+{
+ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+ struct amdgpu_res_cursor src_mm;
+ struct amdgpu_job *job;
+ struct dma_fence *fence;
+ uint64_t src_addr, dst_addr;
+ unsigned int num_dw;
+ int r, idx;
+
+ if (len != PAGE_SIZE)
+ return -EINVAL;
+
+ if (!adev->mman.sdma_access_ptr)
+ return -EACCES;
+
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
+ return -ENODEV;
+
+ if (write)
+ memcpy(adev->mman.sdma_access_ptr, buf, len);
+
+ num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
+ r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ num_dw * 4, AMDGPU_IB_POOL_DELAYED,
+ &job,
+ AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA);
+ if (r)
+ goto out;
+
+ amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
+ src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) +
+ src_mm.start;
+ dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo);
+ if (write)
+ swap(src_addr, dst_addr);
+
+ amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr,
+ PAGE_SIZE, 0);
+
+ amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]);
+ WARN_ON(job->ibs[0].length_dw > num_dw);
+
+ fence = amdgpu_job_submit(job);
+
+ if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
+ r = -ETIMEDOUT;
+ dma_fence_put(fence);
+
+ if (!(r || write))
+ memcpy(buf, adev->mman.sdma_access_ptr, len);
+out:
+ drm_dev_exit(idx);
+ return r;
+}
+
+/**
+ * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
+ *
+ * @bo: The buffer object to read/write
+ * @offset: Offset into buffer object
+ * @buf: Secondary buffer to write/read from
+ * @len: Length in bytes of access
+ * @write: true if writing
+ *
+ * This is used to access VRAM that backs a buffer object via MMIO
+ * access for debugging purposes.
+ */
+static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
+ unsigned long offset, void *buf, int len,
+ int write)
+{
+ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+ struct amdgpu_res_cursor cursor;
+ int ret = 0;
+
+ if (bo->resource->mem_type != TTM_PL_VRAM)
+ return -EIO;
+
+ if (amdgpu_device_has_timeouts_enabled(adev) &&
+ !amdgpu_ttm_access_memory_sdma(bo, offset, buf, len, write))
+ return len;
+
+ amdgpu_res_first(bo->resource, offset, len, &cursor);
+ while (cursor.remaining) {
+ size_t count, size = cursor.size;
+ loff_t pos = cursor.start;
+
+ count = amdgpu_device_aper_access(adev, pos, buf, size, write);
+ size -= count;
+ if (size) {
+ /* using MM to access rest vram and handle un-aligned address */
+ pos += count;
+ buf += count;
+ amdgpu_ttm_vram_mm_access(adev, pos, buf, size, write);
+ }
+
+ ret += cursor.size;
+ buf += cursor.size;
+ amdgpu_res_next(&cursor, cursor.size);
+ }
+
+ return ret;
+}
+
+static void
+amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
+{
+ amdgpu_bo_move_notify(bo, false, NULL);
+}
+
+static struct ttm_device_funcs amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
- .invalidate_caches = &amdgpu_invalidate_caches,
- .init_mem_type = &amdgpu_init_mem_type,
+ .ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
.evict_flags = &amdgpu_evict_flags,
.move = &amdgpu_bo_move,
- .verify_access = &amdgpu_verify_access,
- .move_notify = &amdgpu_bo_move_notify,
- .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
+ .delete_mem_notify = &amdgpu_bo_delete_mem_notify,
+ .release_notify = &amdgpu_bo_release_notify,
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
- .io_mem_free = &amdgpu_ttm_io_mem_free,
.io_mem_pfn = amdgpu_ttm_io_mem_pfn,
+ .access_memory = &amdgpu_ttm_access_memory,
};
+/*
+ * Firmware Reservation functions
+ */
+/**
+ * amdgpu_ttm_fw_reserve_vram_fini - free fw reserved vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * free fw reserved vram if it has been reserved.
+ */
+static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
+{
+ amdgpu_bo_free_kernel(&adev->mman.fw_vram_usage_reserved_bo,
+ NULL, &adev->mman.fw_vram_usage_va);
+}
+
+/*
+ * Driver Reservation functions
+ */
+/**
+ * amdgpu_ttm_drv_reserve_vram_fini - free drv reserved vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * free drv reserved vram if it has been reserved.
+ */
+static void amdgpu_ttm_drv_reserve_vram_fini(struct amdgpu_device *adev)
+{
+ amdgpu_bo_free_kernel(&adev->mman.drv_vram_usage_reserved_bo,
+ NULL,
+ &adev->mman.drv_vram_usage_va);
+}
+
+/**
+ * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * create bo vram reservation from fw.
+ */
+static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
+{
+ uint64_t vram_size = adev->gmc.visible_vram_size;
+
+ adev->mman.fw_vram_usage_va = NULL;
+ adev->mman.fw_vram_usage_reserved_bo = NULL;
+
+ if (adev->mman.fw_vram_usage_size == 0 ||
+ adev->mman.fw_vram_usage_size > vram_size)
+ return 0;
+
+ return amdgpu_bo_create_kernel_at(adev,
+ adev->mman.fw_vram_usage_start_offset,
+ adev->mman.fw_vram_usage_size,
+ &adev->mman.fw_vram_usage_reserved_bo,
+ &adev->mman.fw_vram_usage_va);
+}
+
+/**
+ * amdgpu_ttm_drv_reserve_vram_init - create bo vram reservation from driver
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * create bo vram reservation from drv.
+ */
+static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev)
+{
+ u64 vram_size = adev->gmc.visible_vram_size;
+
+ adev->mman.drv_vram_usage_va = NULL;
+ adev->mman.drv_vram_usage_reserved_bo = NULL;
+
+ if (adev->mman.drv_vram_usage_size == 0 ||
+ adev->mman.drv_vram_usage_size > vram_size)
+ return 0;
+
+ return amdgpu_bo_create_kernel_at(adev,
+ adev->mman.drv_vram_usage_start_offset,
+ adev->mman.drv_vram_usage_size,
+ &adev->mman.drv_vram_usage_reserved_bo,
+ &adev->mman.drv_vram_usage_va);
+}
+
+/*
+ * Memoy training reservation functions
+ */
+
+/**
+ * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * free memory training reserved vram if it has been reserved.
+ */
+static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)
+{
+ struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
+
+ ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT;
+ amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);
+ ctx->c2p_bo = NULL;
+
+ return 0;
+}
+
+static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev,
+ uint32_t reserve_size)
+{
+ struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ ctx->c2p_train_data_offset =
+ ALIGN((adev->gmc.mc_vram_size - reserve_size - SZ_1M), SZ_1M);
+ ctx->p2c_train_data_offset =
+ (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
+ ctx->train_data_size =
+ GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
+
+ DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
+ ctx->train_data_size,
+ ctx->p2c_train_data_offset,
+ ctx->c2p_train_data_offset);
+}
+
+/*
+ * reserve TMR memory at the top of VRAM which holds
+ * IP Discovery data and is protected by PSP.
+ */
+static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
+{
+ struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx;
+ bool mem_train_support = false;
+ uint32_t reserve_size = 0;
+ int ret;
+
+ if (adev->bios && !amdgpu_sriov_vf(adev)) {
+ if (amdgpu_atomfirmware_mem_training_supported(adev))
+ mem_train_support = true;
+ else
+ DRM_DEBUG("memory training does not support!\n");
+ }
+
+ /*
+ * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all
+ * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc)
+ *
+ * Otherwise, fallback to legacy approach to check and reserve tmr block for ip
+ * discovery data and G6 memory training data respectively
+ */
+ if (adev->bios)
+ reserve_size =
+ amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
+
+ if (!adev->bios &&
+ (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4) ||
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)))
+ reserve_size = max(reserve_size, (uint32_t)280 << 20);
+ else if (!reserve_size)
+ reserve_size = DISCOVERY_TMR_OFFSET;
+
+ if (mem_train_support) {
+ /* reserve vram for mem train according to TMR location */
+ amdgpu_ttm_training_data_block_init(adev, reserve_size);
+ ret = amdgpu_bo_create_kernel_at(adev,
+ ctx->c2p_train_data_offset,
+ ctx->train_data_size,
+ &ctx->c2p_bo,
+ NULL);
+ if (ret) {
+ dev_err(adev->dev, "alloc c2p_bo failed(%d)!\n", ret);
+ amdgpu_ttm_training_reserve_vram_fini(adev);
+ return ret;
+ }
+ ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;
+ }
+
+ if (!adev->gmc.is_app_apu) {
+ ret = amdgpu_bo_create_kernel_at(
+ adev, adev->gmc.real_vram_size - reserve_size,
+ reserve_size, &adev->mman.fw_reserved_memory, NULL);
+ if (ret) {
+ dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory,
+ NULL, NULL);
+ return ret;
+ }
+ } else {
+ DRM_DEBUG_DRIVER("backdoor fw loading path for PSP TMR, no reservation needed\n");
+ }
+
+ return 0;
+}
+
+static int amdgpu_ttm_pools_init(struct amdgpu_device *adev)
+{
+ int i;
+
+ if (!adev->gmc.is_app_apu || !adev->gmc.num_mem_partitions)
+ return 0;
+
+ adev->mman.ttm_pools = kcalloc(adev->gmc.num_mem_partitions,
+ sizeof(*adev->mman.ttm_pools),
+ GFP_KERNEL);
+ if (!adev->mman.ttm_pools)
+ return -ENOMEM;
+
+ for (i = 0; i < adev->gmc.num_mem_partitions; i++) {
+ ttm_pool_init(&adev->mman.ttm_pools[i], adev->dev,
+ adev->gmc.mem_partitions[i].numa.node,
+ false, false);
+ }
+ return 0;
+}
+
+static void amdgpu_ttm_pools_fini(struct amdgpu_device *adev)
+{
+ int i;
+
+ if (!adev->gmc.is_app_apu || !adev->mman.ttm_pools)
+ return;
+
+ for (i = 0; i < adev->gmc.num_mem_partitions; i++)
+ ttm_pool_fini(&adev->mman.ttm_pools[i]);
+
+ kfree(adev->mman.ttm_pools);
+ adev->mman.ttm_pools = NULL;
+}
+
+/**
+ * amdgpu_ttm_mmio_remap_bo_init - Allocate the singleton 4K MMIO_REMAP BO
+ * @adev: amdgpu device
+ *
+ * Allocates a one-page (4K) GEM BO in AMDGPU_GEM_DOMAIN_MMIO_REMAP when the
+ * hardware exposes a remap base (adev->rmmio_remap.bus_addr) and the host
+ * PAGE_SIZE is <= AMDGPU_GPU_PAGE_SIZE (4K). The BO is created as a regular
+ * GEM object (amdgpu_bo_create).
+ *
+ * Return:
+ * * 0 on success or intentional skip (feature not present/unsupported)
+ * * negative errno on allocation failure
+ */
+static int amdgpu_ttm_mmio_remap_bo_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_bo_param bp;
+ int r;
+
+ /* Skip if HW doesn't expose remap, or if PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE (4K). */
+ if (!adev->rmmio_remap.bus_addr || PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE)
+ return 0;
+
+ memset(&bp, 0, sizeof(bp));
+
+ /* Create exactly one GEM BO in the MMIO_REMAP domain. */
+ bp.type = ttm_bo_type_device; /* userspace-mappable GEM */
+ bp.size = AMDGPU_GPU_PAGE_SIZE; /* 4K */
+ bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_MMIO_REMAP;
+ bp.flags = 0;
+ bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
+ r = amdgpu_bo_create(adev, &bp, &adev->rmmio_remap.bo);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+/**
+ * amdgpu_ttm_mmio_remap_bo_fini - Free the singleton MMIO_REMAP BO
+ * @adev: amdgpu device
+ *
+ * Frees the kernel-owned MMIO_REMAP BO if it was allocated by
+ * amdgpu_ttm_mmio_remap_bo_init().
+ */
+static void amdgpu_ttm_mmio_remap_bo_fini(struct amdgpu_device *adev)
+{
+ amdgpu_bo_unref(&adev->rmmio_remap.bo);
+ adev->rmmio_remap.bo = NULL;
+}
+
+/*
+ * amdgpu_ttm_init - Init the memory management (ttm) as well as various
+ * gtt/vram related fields.
+ *
+ * This initializes all of the memory space pools that the TTM layer
+ * will need such as the GTT space (system memory mapped to the device),
+ * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which
+ * can be mapped per VMID.
+ */
int amdgpu_ttm_init(struct amdgpu_device *adev)
{
+ uint64_t gtt_size;
int r;
- r = amdgpu_ttm_global_init(adev);
+ mutex_init(&adev->mman.gtt_window_lock);
+
+ dma_set_max_seg_size(adev->dev, UINT_MAX);
+ /* No others user of address space so set it to 0 */
+ r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
+ adev_to_drm(adev)->anon_inode->i_mapping,
+ adev_to_drm(adev)->vma_offset_manager,
+ adev->need_swiotlb,
+ dma_addressing_limited(adev->dev));
if (r) {
+ dev_err(adev->dev,
+ "failed initializing buffer object driver(%d).\n", r);
return r;
}
- /* No others user of address space so set it to 0 */
- r = ttm_bo_device_init(&adev->mman.bdev,
- adev->mman.bo_global_ref.ref.object,
- &amdgpu_bo_driver,
- adev->ddev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
- adev->need_dma32);
+
+ r = amdgpu_ttm_pools_init(adev);
if (r) {
- DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
+ dev_err(adev->dev, "failed to init ttm pools(%d).\n", r);
return r;
}
adev->mman.initialized = true;
- r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
- adev->mc.real_vram_size >> PAGE_SHIFT);
- if (r) {
- DRM_ERROR("Failed initializing VRAM heap.\n");
- return r;
+
+ if (!adev->gmc.is_app_apu) {
+ /* Initialize VRAM pool with all of VRAM divided into pages */
+ r = amdgpu_vram_mgr_init(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed initializing VRAM heap.\n");
+ return r;
+ }
}
+
/* Change the size here instead of the init above so only lpfn is affected */
- amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
+ amdgpu_ttm_set_buffer_funcs_status(adev, false);
+#ifdef CONFIG_64BIT
+#ifdef CONFIG_X86
+ if (adev->gmc.xgmi.connected_to_cpu)
+ adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base,
+ adev->gmc.visible_vram_size);
+
+ else if (adev->gmc.is_app_apu)
+ DRM_DEBUG_DRIVER(
+ "No need to ioremap when real vram size is 0\n");
+ else
+#endif
+ adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
+ adev->gmc.visible_vram_size);
+#endif
+
+ /*
+ *The reserved vram for firmware must be pinned to the specified
+ *place on the VRAM, so reserve it early.
+ */
+ r = amdgpu_ttm_fw_reserve_vram_init(adev);
+ if (r)
+ return r;
+
+ /*
+ *The reserved vram for driver must be pinned to the specified
+ *place on the VRAM, so reserve it early.
+ */
+ r = amdgpu_ttm_drv_reserve_vram_init(adev);
+ if (r)
+ return r;
+
+ /*
+ * only NAVI10 and onwards ASIC support for IP discovery.
+ * If IP discovery enabled, a block of memory should be
+ * reserved for IP discovey.
+ */
+ if (adev->mman.discovery_bin) {
+ r = amdgpu_ttm_reserve_tmr(adev);
+ if (r)
+ return r;
+ }
+
+ /* allocate memory as required for VGA
+ * This is used for VGA emulation and pre-OS scanout buffers to
+ * avoid display artifacts while transitioning between pre-OS
+ * and driver.
+ */
+ if (!adev->gmc.is_app_apu) {
+ r = amdgpu_bo_create_kernel_at(adev, 0,
+ adev->mman.stolen_vga_size,
+ &adev->mman.stolen_vga_memory,
+ NULL);
+ if (r)
+ return r;
- r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &adev->stollen_vga_memory);
+ r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
+ adev->mman.stolen_extended_size,
+ &adev->mman.stolen_extended_memory,
+ NULL);
+
+ if (r)
+ return r;
+
+ r = amdgpu_bo_create_kernel_at(adev,
+ adev->mman.stolen_reserved_offset,
+ adev->mman.stolen_reserved_size,
+ &adev->mman.stolen_reserved_memory,
+ NULL);
+ if (r)
+ return r;
+ } else {
+ DRM_DEBUG_DRIVER("Skipped stolen memory reservation\n");
+ }
+
+ dev_info(adev->dev, "amdgpu: %uM of VRAM memory ready\n",
+ (unsigned int)(adev->gmc.real_vram_size / (1024 * 1024)));
+
+ /* Compute GTT size, either based on TTM limit
+ * or whatever the user passed on module init.
+ */
+ gtt_size = ttm_tt_pages_limit() << PAGE_SHIFT;
+ if (amdgpu_gtt_size != -1) {
+ uint64_t configured_size = (uint64_t)amdgpu_gtt_size << 20;
+
+ drm_warn(&adev->ddev,
+ "Configuring gttsize via module parameter is deprecated, please use ttm.pages_limit\n");
+ if (gtt_size != configured_size)
+ drm_warn(&adev->ddev,
+ "GTT size has been set as %llu but TTM size has been set as %llu, this is unusual\n",
+ configured_size, gtt_size);
+
+ gtt_size = configured_size;
+ }
+
+ /* Initialize GTT memory pool */
+ r = amdgpu_gtt_mgr_init(adev, gtt_size);
if (r) {
+ dev_err(adev->dev, "Failed initializing GTT heap.\n");
return r;
}
- r = amdgpu_bo_reserve(adev->stollen_vga_memory, false);
- if (r)
+ dev_info(adev->dev, "amdgpu: %uM of GTT memory ready.\n",
+ (unsigned int)(gtt_size / (1024 * 1024)));
+
+ if (adev->flags & AMD_IS_APU) {
+ if (adev->gmc.real_vram_size < gtt_size)
+ adev->apu_prefer_gtt = true;
+ }
+
+ /* Initialize doorbell pool on PCI BAR */
+ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE);
+ if (r) {
+ dev_err(adev->dev, "Failed initializing doorbell heap.\n");
return r;
- r = amdgpu_bo_pin(adev->stollen_vga_memory, AMDGPU_GEM_DOMAIN_VRAM, NULL);
- amdgpu_bo_unreserve(adev->stollen_vga_memory);
+ }
+
+ /* Create a boorbell page for kernel usages */
+ r = amdgpu_doorbell_create_kernel_doorbells(adev);
if (r) {
- amdgpu_bo_unref(&adev->stollen_vga_memory);
+ dev_err(adev->dev, "Failed to initialize kernel doorbells.\n");
return r;
}
- DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
- (unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
- r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT,
- adev->mc.gtt_size >> PAGE_SHIFT);
+
+ /* Initialize MMIO-remap pool (single page 4K) */
+ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_MMIO_REMAP, 1);
if (r) {
- DRM_ERROR("Failed initializing GTT heap.\n");
+ dev_err(adev->dev, "Failed initializing MMIO-remap heap.\n");
return r;
}
- DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
- (unsigned)(adev->mc.gtt_size / (1024 * 1024)));
-
- adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT;
- adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT;
- adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT;
- adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT;
- adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT;
- adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT;
- adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT;
- adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT;
- adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT;
- /* GDS Memory */
- if (adev->gds.mem.total_size) {
- r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS,
- adev->gds.mem.total_size >> PAGE_SHIFT);
- if (r) {
- DRM_ERROR("Failed initializing GDS heap.\n");
- return r;
- }
+
+ /* Allocate the singleton MMIO_REMAP BO (4K) if supported */
+ r = amdgpu_ttm_mmio_remap_bo_init(adev);
+ if (r)
+ return r;
+
+ /* Initialize preemptible memory pool */
+ r = amdgpu_preempt_mgr_init(adev);
+ if (r) {
+ dev_err(adev->dev, "Failed initializing PREEMPT heap.\n");
+ return r;
}
- /* GWS */
- if (adev->gds.gws.total_size) {
- r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS,
- adev->gds.gws.total_size >> PAGE_SHIFT);
- if (r) {
- DRM_ERROR("Failed initializing gws heap.\n");
- return r;
- }
+ /* Initialize various on-chip memory pools */
+ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
+ if (r) {
+ dev_err(adev->dev, "Failed initializing GDS heap.\n");
+ return r;
}
- /* OA */
- if (adev->gds.oa.total_size) {
- r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA,
- adev->gds.oa.total_size >> PAGE_SHIFT);
- if (r) {
- DRM_ERROR("Failed initializing oa heap.\n");
- return r;
- }
+ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size);
+ if (r) {
+ dev_err(adev->dev, "Failed initializing gws heap.\n");
+ return r;
}
- r = amdgpu_ttm_debugfs_init(adev);
+ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size);
if (r) {
- DRM_ERROR("Failed to init debugfs\n");
+ dev_err(adev->dev, "Failed initializing oa heap.\n");
return r;
}
+ if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->mman.sdma_access_bo, NULL,
+ &adev->mman.sdma_access_ptr))
+ DRM_WARN("Debug VRAM access will use slowpath MM access\n");
+
return 0;
}
+/*
+ * amdgpu_ttm_fini - De-initialize the TTM memory pools
+ */
void amdgpu_ttm_fini(struct amdgpu_device *adev)
{
- int r;
+ int idx;
if (!adev->mman.initialized)
return;
- amdgpu_ttm_debugfs_fini(adev);
- if (adev->stollen_vga_memory) {
- r = amdgpu_bo_reserve(adev->stollen_vga_memory, true);
- if (r == 0) {
- amdgpu_bo_unpin(adev->stollen_vga_memory);
- amdgpu_bo_unreserve(adev->stollen_vga_memory);
- }
- amdgpu_bo_unref(&adev->stollen_vga_memory);
- }
- ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
- ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
- if (adev->gds.mem.total_size)
- ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS);
- if (adev->gds.gws.total_size)
- ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS);
- if (adev->gds.oa.total_size)
- ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
- ttm_bo_device_release(&adev->mman.bdev);
- amdgpu_gart_fini(adev);
- amdgpu_ttm_global_fini(adev);
+
+ amdgpu_ttm_pools_fini(adev);
+
+ amdgpu_ttm_training_reserve_vram_fini(adev);
+ /* return the stolen vga memory back to VRAM */
+ if (!adev->gmc.is_app_apu) {
+ amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
+ /* return the FW reserved memory back to VRAM */
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL,
+ NULL);
+ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL,
+ NULL);
+ if (adev->mman.stolen_reserved_size)
+ amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
+ NULL, NULL);
+ }
+ amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
+ &adev->mman.sdma_access_ptr);
+
+ amdgpu_ttm_mmio_remap_bo_fini(adev);
+ amdgpu_ttm_fw_reserve_vram_fini(adev);
+ amdgpu_ttm_drv_reserve_vram_fini(adev);
+
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+
+ if (adev->mman.aper_base_kaddr)
+ iounmap(adev->mman.aper_base_kaddr);
+ adev->mman.aper_base_kaddr = NULL;
+
+ drm_dev_exit(idx);
+ }
+
+ if (!adev->gmc.is_app_apu)
+ amdgpu_vram_mgr_fini(adev);
+ amdgpu_gtt_mgr_fini(adev);
+ amdgpu_preempt_mgr_fini(adev);
+ amdgpu_doorbell_fini(adev);
+
+ ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
+ ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
+ ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
+ ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL);
+ ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_MMIO_REMAP);
+ ttm_device_fini(&adev->mman.bdev);
adev->mman.initialized = false;
- DRM_INFO("amdgpu: ttm finalized\n");
+ dev_info(adev->dev, "amdgpu: ttm finalized\n");
}
-/* this should only be called at bootup or when userspace
- * isn't running */
-void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size)
+/**
+ * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: true when we can use buffer functions.
+ *
+ * Enable/disable use of buffer functions during suspend/resume. This should
+ * only be called at bootup or when userspace isn't running.
+ */
+void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
{
- struct ttm_mem_type_manager *man;
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
+ uint64_t size;
+ int r;
- if (!adev->mman.initialized)
+ if (!adev->mman.initialized || amdgpu_in_reset(adev) ||
+ adev->mman.buffer_funcs_enabled == enable || adev->gmc.is_app_apu)
return;
- man = &adev->mman.bdev.man[TTM_PL_VRAM];
+ if (enable) {
+ struct amdgpu_ring *ring;
+ struct drm_gpu_scheduler *sched;
+
+ ring = adev->mman.buffer_funcs_ring;
+ sched = &ring->sched;
+ r = drm_sched_entity_init(&adev->mman.high_pr,
+ DRM_SCHED_PRIORITY_KERNEL, &sched,
+ 1, NULL);
+ if (r) {
+ dev_err(adev->dev,
+ "Failed setting up TTM BO move entity (%d)\n",
+ r);
+ return;
+ }
+
+ r = drm_sched_entity_init(&adev->mman.low_pr,
+ DRM_SCHED_PRIORITY_NORMAL, &sched,
+ 1, NULL);
+ if (r) {
+ dev_err(adev->dev,
+ "Failed setting up TTM BO move entity (%d)\n",
+ r);
+ goto error_free_entity;
+ }
+ } else {
+ drm_sched_entity_destroy(&adev->mman.high_pr);
+ drm_sched_entity_destroy(&adev->mman.low_pr);
+ dma_fence_put(man->move);
+ man->move = NULL;
+ }
+
/* this just adjusts TTM size idea, which sets lpfn to the correct value */
- man->size = size >> PAGE_SHIFT;
+ if (enable)
+ size = adev->gmc.real_vram_size;
+ else
+ size = adev->gmc.visible_vram_size;
+ man->size = size;
+ adev->mman.buffer_funcs_enabled = enable;
+
+ return;
+
+error_free_entity:
+ drm_sched_entity_destroy(&adev->mman.high_pr);
}
-int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
+static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
+ bool direct_submit,
+ unsigned int num_dw,
+ struct dma_resv *resv,
+ bool vm_needs_flush,
+ struct amdgpu_job **job,
+ bool delayed, u64 k_job_id)
{
- struct drm_file *file_priv;
- struct amdgpu_device *adev;
-
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
+ enum amdgpu_ib_pool_type pool = direct_submit ?
+ AMDGPU_IB_POOL_DIRECT :
+ AMDGPU_IB_POOL_DELAYED;
+ int r;
+ struct drm_sched_entity *entity = delayed ? &adev->mman.low_pr :
+ &adev->mman.high_pr;
+ r = amdgpu_job_alloc_with_ib(adev, entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ num_dw * 4, pool, job, k_job_id);
+ if (r)
+ return r;
- file_priv = filp->private_data;
- adev = file_priv->minor->dev->dev_private;
- if (adev == NULL)
- return -EINVAL;
+ if (vm_needs_flush) {
+ (*job)->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
+ adev->gmc.pdb0_bo :
+ adev->gart.bo);
+ (*job)->vm_needs_flush = true;
+ }
+ if (!resv)
+ return 0;
- return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
+ return drm_sched_job_add_resv_dependencies(&(*job)->base, resv,
+ DMA_RESV_USAGE_BOOKKEEP);
}
-int amdgpu_copy_buffer(struct amdgpu_ring *ring,
- uint64_t src_offset,
- uint64_t dst_offset,
- uint32_t byte_count,
- struct reservation_object *resv,
- struct dma_fence **fence, bool direct_submit)
+int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
+ uint64_t dst_offset, uint32_t byte_count,
+ struct dma_resv *resv,
+ struct dma_fence **fence, bool direct_submit,
+ bool vm_needs_flush, uint32_t copy_flags)
{
struct amdgpu_device *adev = ring->adev;
+ unsigned int num_loops, num_dw;
struct amdgpu_job *job;
-
uint32_t max_bytes;
- unsigned num_loops, num_dw;
- unsigned i;
+ unsigned int i;
int r;
+ if (!direct_submit && !ring->sched.ready) {
+ dev_err(adev->dev,
+ "Trying to move memory with ring turned off.\n");
+ return -EINVAL;
+ }
+
max_bytes = adev->mman.buffer_funcs->copy_max_bytes;
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
- num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw;
-
- /* for IB padding */
- while (num_dw & 0x7)
- num_dw++;
-
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+ num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
+ r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw,
+ resv, vm_needs_flush, &job, false,
+ AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER);
if (r)
return r;
- if (resv) {
- r = amdgpu_sync_resv(adev, &job->sync, resv,
- AMDGPU_FENCE_OWNER_UNDEFINED);
- if (r) {
- DRM_ERROR("sync failed (%d).\n", r);
- goto error_free;
- }
- }
-
for (i = 0; i < num_loops; i++) {
uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
- dst_offset, cur_size_in_bytes);
-
+ dst_offset, cur_size_in_bytes, copy_flags);
src_offset += cur_size_in_bytes;
dst_offset += cur_size_in_bytes;
byte_count -= cur_size_in_bytes;
@@ -1299,186 +2320,283 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
WARN_ON(job->ibs[0].length_dw > num_dw);
- if (direct_submit) {
- r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs,
- NULL, fence);
- job->fence = dma_fence_get(*fence);
- if (r)
- DRM_ERROR("Error scheduling IBs (%d)\n", r);
- amdgpu_job_free(job);
- } else {
- r = amdgpu_job_submit(job, ring, &adev->mman.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, fence);
- if (r)
- goto error_free;
- }
+ if (direct_submit)
+ r = amdgpu_job_submit_direct(job, ring, fence);
+ else
+ *fence = amdgpu_job_submit(job);
+ if (r)
+ goto error_free;
return r;
error_free:
amdgpu_job_free(job);
+ dev_err(adev->dev, "Error scheduling IBs (%d)\n", r);
return r;
}
-int amdgpu_fill_buffer(struct amdgpu_bo *bo,
- uint32_t src_data,
- struct reservation_object *resv,
- struct dma_fence **fence)
+static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
+ uint64_t dst_addr, uint32_t byte_count,
+ struct dma_resv *resv,
+ struct dma_fence **fence,
+ bool vm_needs_flush, bool delayed,
+ u64 k_job_id)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
- struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
-
- struct drm_mm_node *mm_node;
- unsigned long num_pages;
+ struct amdgpu_device *adev = ring->adev;
unsigned int num_loops, num_dw;
-
struct amdgpu_job *job;
+ uint32_t max_bytes;
+ unsigned int i;
int r;
- if (!ring->ready) {
- DRM_ERROR("Trying to clear memory with ring turned off.\n");
- return -EINVAL;
- }
+ max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
+ num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes);
+ num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8);
+ r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush,
+ &job, delayed, k_job_id);
+ if (r)
+ return r;
- num_pages = bo->tbo.num_pages;
- mm_node = bo->tbo.mem.mm_node;
- num_loops = 0;
- while (num_pages) {
- uint32_t byte_count = mm_node->size << PAGE_SHIFT;
+ for (i = 0; i < num_loops; i++) {
+ uint32_t cur_size = min(byte_count, max_bytes);
+
+ amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
+ cur_size);
- num_loops += DIV_ROUND_UP(byte_count, max_bytes);
- num_pages -= mm_node->size;
- ++mm_node;
+ dst_addr += cur_size;
+ byte_count -= cur_size;
}
- num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
- /* for IB padding */
- num_dw += 64;
+ amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+ WARN_ON(job->ibs[0].length_dw > num_dw);
+ *fence = amdgpu_job_submit(job);
+ return 0;
+}
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
- if (r)
- return r;
+/**
+ * amdgpu_ttm_clear_buffer - clear memory buffers
+ * @bo: amdgpu buffer object
+ * @resv: reservation object
+ * @fence: dma_fence associated with the operation
+ *
+ * Clear the memory buffer resource.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
+int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
+ struct dma_resv *resv,
+ struct dma_fence **fence)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+ struct amdgpu_res_cursor cursor;
+ u64 addr;
+ int r = 0;
- if (resv) {
- r = amdgpu_sync_resv(adev, &job->sync, resv,
- AMDGPU_FENCE_OWNER_UNDEFINED);
- if (r) {
- DRM_ERROR("sync failed (%d).\n", r);
- goto error_free;
+ if (!adev->mman.buffer_funcs_enabled)
+ return -EINVAL;
+
+ if (!fence)
+ return -EINVAL;
+
+ *fence = dma_fence_get_stub();
+
+ amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
+
+ mutex_lock(&adev->mman.gtt_window_lock);
+ while (cursor.remaining) {
+ struct dma_fence *next = NULL;
+ u64 size;
+
+ if (amdgpu_res_cleared(&cursor)) {
+ amdgpu_res_next(&cursor, cursor.size);
+ continue;
}
- }
- num_pages = bo->tbo.num_pages;
- mm_node = bo->tbo.mem.mm_node;
+ /* Never clear more than 256MiB at once to avoid timeouts */
+ size = min(cursor.size, 256ULL << 20);
- while (num_pages) {
- uint32_t byte_count = mm_node->size << PAGE_SHIFT;
- uint64_t dst_addr;
+ r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &cursor,
+ 1, ring, false, &size, &addr);
+ if (r)
+ goto err;
- r = amdgpu_mm_node_addr(&bo->tbo, mm_node,
- &bo->tbo.mem, &dst_addr);
+ r = amdgpu_ttm_fill_mem(ring, 0, addr, size, resv,
+ &next, true, true,
+ AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER);
if (r)
- return r;
+ goto err;
- while (byte_count) {
- uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
+ dma_fence_put(*fence);
+ *fence = next;
- amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
- dst_addr, cur_size_in_bytes);
+ amdgpu_res_next(&cursor, size);
+ }
+err:
+ mutex_unlock(&adev->mman.gtt_window_lock);
- dst_addr += cur_size_in_bytes;
- byte_count -= cur_size_in_bytes;
- }
+ return r;
+}
+
+int amdgpu_fill_buffer(struct amdgpu_bo *bo,
+ uint32_t src_data,
+ struct dma_resv *resv,
+ struct dma_fence **f,
+ bool delayed,
+ u64 k_job_id)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
+ struct dma_fence *fence = NULL;
+ struct amdgpu_res_cursor dst;
+ int r;
- num_pages -= mm_node->size;
- ++mm_node;
+ if (!adev->mman.buffer_funcs_enabled) {
+ dev_err(adev->dev,
+ "Trying to clear memory with ring turned off.\n");
+ return -EINVAL;
}
- amdgpu_ring_pad_ib(ring, &job->ibs[0]);
- WARN_ON(job->ibs[0].length_dw > num_dw);
- r = amdgpu_job_submit(job, ring, &adev->mman.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, fence);
- if (r)
- goto error_free;
+ amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &dst);
- return 0;
+ mutex_lock(&adev->mman.gtt_window_lock);
+ while (dst.remaining) {
+ struct dma_fence *next;
+ uint64_t cur_size, to;
-error_free:
- amdgpu_job_free(job);
+ /* Never fill more than 256MiB at once to avoid timeouts */
+ cur_size = min(dst.size, 256ULL << 20);
+
+ r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &dst,
+ 1, ring, false, &cur_size, &to);
+ if (r)
+ goto error;
+
+ r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv,
+ &next, true, delayed, k_job_id);
+ if (r)
+ goto error;
+
+ dma_fence_put(fence);
+ fence = next;
+
+ amdgpu_res_next(&dst, cur_size);
+ }
+error:
+ mutex_unlock(&adev->mman.gtt_window_lock);
+ if (f)
+ *f = dma_fence_get(fence);
+ dma_fence_put(fence);
return r;
}
-#if defined(CONFIG_DEBUG_FS)
+/**
+ * amdgpu_ttm_evict_resources - evict memory buffers
+ * @adev: amdgpu device object
+ * @mem_type: evicted BO's memory type
+ *
+ * Evicts all @mem_type buffers on the lru list of the memory type.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
+int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type)
+{
+ struct ttm_resource_manager *man;
-extern void amdgpu_gtt_mgr_print(struct seq_file *m, struct ttm_mem_type_manager
- *man);
-static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- unsigned ttm_pl = *(int *)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = dev->dev_private;
- struct drm_mm *mm = (struct drm_mm *)adev->mman.bdev.man[ttm_pl].priv;
- struct ttm_bo_global *glob = adev->mman.bdev.glob;
- struct drm_printer p = drm_seq_file_printer(m);
-
- spin_lock(&glob->lru_lock);
- drm_mm_print(mm, &p);
- spin_unlock(&glob->lru_lock);
- switch (ttm_pl) {
+ switch (mem_type) {
case TTM_PL_VRAM:
- seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
- adev->mman.bdev.man[ttm_pl].size,
- (u64)atomic64_read(&adev->vram_usage) >> 20,
- (u64)atomic64_read(&adev->vram_vis_usage) >> 20);
- break;
case TTM_PL_TT:
- amdgpu_gtt_mgr_print(m, &adev->mman.bdev.man[TTM_PL_TT]);
+ case AMDGPU_PL_GWS:
+ case AMDGPU_PL_GDS:
+ case AMDGPU_PL_OA:
+ man = ttm_manager_type(&adev->mman.bdev, mem_type);
break;
+ default:
+ dev_err(adev->dev, "Trying to evict invalid memory type\n");
+ return -EINVAL;
}
- return 0;
+
+ return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
}
-static int ttm_pl_vram = TTM_PL_VRAM;
-static int ttm_pl_tt = TTM_PL_TT;
+#if defined(CONFIG_DEBUG_FS)
-static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
- {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram},
- {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt},
- {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
-#ifdef CONFIG_SWIOTLB
- {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
-#endif
-};
+static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused)
+{
+ struct amdgpu_device *adev = m->private;
+
+ return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
+}
+DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool);
+
+/*
+ * amdgpu_ttm_vram_read - Linear read access to VRAM
+ *
+ * Accesses VRAM via MMIO for debugging purposes.
+ */
static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ if (*pos >= adev->gmc.mc_vram_size)
+ return -ENXIO;
+
+ size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
+ while (size) {
+ size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
+ uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
+
+ amdgpu_device_vram_access(adev, *pos, value, bytes, false);
+ if (copy_to_user(buf, value, bytes))
+ return -EFAULT;
+
+ result += bytes;
+ buf += bytes;
+ *pos += bytes;
+ size -= bytes;
+ }
+
+ return result;
+}
+
+/*
+ * amdgpu_ttm_vram_write - Linear write access to VRAM
+ *
+ * Accesses VRAM via MMIO for debugging purposes.
+ */
+static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
int r;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ if (*pos >= adev->gmc.mc_vram_size)
+ return -ENXIO;
+
while (size) {
- unsigned long flags;
uint32_t value;
- if (*pos >= adev->mc.mc_vram_size)
+ if (*pos >= adev->gmc.mc_vram_size)
return result;
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- WREG32(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
- WREG32(mmMM_INDEX_HI, *pos >> 31);
- value = RREG32(mmMM_DATA);
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
-
- r = put_user(value, (uint32_t *)buf);
+ r = get_user(value, (uint32_t *)buf);
if (r)
return r;
+ amdgpu_device_mm_access(adev, *pos, &value, 4, true);
+
result += 4;
buf += 4;
*pos += 4;
@@ -1491,109 +2609,153 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
static const struct file_operations amdgpu_ttm_vram_fops = {
.owner = THIS_MODULE,
.read = amdgpu_ttm_vram_read,
- .llseek = default_llseek
+ .write = amdgpu_ttm_vram_write,
+ .llseek = default_llseek,
};
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-
-static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
+/*
+ * amdgpu_iomem_read - Virtual read access to GPU mapped memory
+ *
+ * This function is used to read memory that has been mapped to the
+ * GPU and the known addresses are not physical addresses but instead
+ * bus addresses (e.g., what you'd put in an IB or ring buffer).
+ */
+static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
{
struct amdgpu_device *adev = file_inode(f)->i_private;
+ struct iommu_domain *dom;
ssize_t result = 0;
int r;
+ /* retrieve the IOMMU domain if any for this device */
+ dom = iommu_get_domain_for_dev(adev->dev);
+
while (size) {
- loff_t p = *pos / PAGE_SIZE;
- unsigned off = *pos & ~PAGE_MASK;
- size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
- struct page *page;
+ phys_addr_t addr = *pos & PAGE_MASK;
+ loff_t off = *pos & ~PAGE_MASK;
+ size_t bytes = PAGE_SIZE - off;
+ unsigned long pfn;
+ struct page *p;
void *ptr;
- if (p >= adev->gart.num_cpu_pages)
- return result;
+ bytes = min(bytes, size);
- page = adev->gart.pages[p];
- if (page) {
- ptr = kmap(page);
- ptr += off;
+ /* Translate the bus address to a physical address. If
+ * the domain is NULL it means there is no IOMMU active
+ * and the address translation is the identity
+ */
+ addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
- r = copy_to_user(buf, ptr, cur_size);
- kunmap(adev->gart.pages[p]);
- } else
- r = clear_user(buf, cur_size);
+ pfn = addr >> PAGE_SHIFT;
+ if (!pfn_valid(pfn))
+ return -EPERM;
+ p = pfn_to_page(pfn);
+ if (p->mapping != adev->mman.bdev.dev_mapping)
+ return -EPERM;
+
+ ptr = kmap_local_page(p);
+ r = copy_to_user(buf, ptr + off, bytes);
+ kunmap_local(ptr);
if (r)
return -EFAULT;
- result += cur_size;
- buf += cur_size;
- *pos += cur_size;
- size -= cur_size;
+ size -= bytes;
+ *pos += bytes;
+ result += bytes;
}
return result;
}
-static const struct file_operations amdgpu_ttm_gtt_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_ttm_gtt_read,
- .llseek = default_llseek
-};
+/*
+ * amdgpu_iomem_write - Virtual write access to GPU mapped memory
+ *
+ * This function is used to write memory that has been mapped to the
+ * GPU and the known addresses are not physical addresses but instead
+ * bus addresses (e.g., what you'd put in an IB or ring buffer).
+ */
+static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ struct iommu_domain *dom;
+ ssize_t result = 0;
+ int r;
-#endif
+ dom = iommu_get_domain_for_dev(adev->dev);
-#endif
+ while (size) {
+ phys_addr_t addr = *pos & PAGE_MASK;
+ loff_t off = *pos & ~PAGE_MASK;
+ size_t bytes = PAGE_SIZE - off;
+ unsigned long pfn;
+ struct page *p;
+ void *ptr;
-static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
-{
-#if defined(CONFIG_DEBUG_FS)
- unsigned count;
-
- struct drm_minor *minor = adev->ddev->primary;
- struct dentry *ent, *root = minor->debugfs_root;
-
- ent = debugfs_create_file("amdgpu_vram", S_IFREG | S_IRUGO, root,
- adev, &amdgpu_ttm_vram_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
- i_size_write(ent->d_inode, adev->mc.mc_vram_size);
- adev->mman.vram = ent;
-
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root,
- adev, &amdgpu_ttm_gtt_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
- i_size_write(ent->d_inode, adev->mc.gtt_size);
- adev->mman.gtt = ent;
+ bytes = min(bytes, size);
-#endif
- count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
+ addr = dom ? iommu_iova_to_phys(dom, addr) : addr;
-#ifdef CONFIG_SWIOTLB
- if (!swiotlb_nr_tbl())
- --count;
-#endif
+ pfn = addr >> PAGE_SHIFT;
+ if (!pfn_valid(pfn))
+ return -EPERM;
- return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
-#else
+ p = pfn_to_page(pfn);
+ if (p->mapping != adev->mman.bdev.dev_mapping)
+ return -EPERM;
- return 0;
-#endif
-}
+ ptr = kmap_local_page(p);
+ r = copy_from_user(ptr + off, buf, bytes);
+ kunmap_local(ptr);
+ if (r)
+ return -EFAULT;
-static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
-{
-#if defined(CONFIG_DEBUG_FS)
+ size -= bytes;
+ *pos += bytes;
+ result += bytes;
+ }
+
+ return result;
+}
- debugfs_remove(adev->mman.vram);
- adev->mman.vram = NULL;
+static const struct file_operations amdgpu_ttm_iomem_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_iomem_read,
+ .write = amdgpu_iomem_write,
+ .llseek = default_llseek
+};
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- debugfs_remove(adev->mman.gtt);
- adev->mman.gtt = NULL;
#endif
+void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+
+ debugfs_create_file_size("amdgpu_vram", 0444, root, adev,
+ &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size);
+ debugfs_create_file("amdgpu_iomem", 0444, root, adev,
+ &amdgpu_ttm_iomem_fops);
+ debugfs_create_file("ttm_page_pool", 0444, root, adev,
+ &amdgpu_ttm_page_pool_fops);
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
+ TTM_PL_VRAM),
+ root, "amdgpu_vram_mm");
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
+ TTM_PL_TT),
+ root, "amdgpu_gtt_mm");
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
+ AMDGPU_PL_GDS),
+ root, "amdgpu_gds_mm");
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
+ AMDGPU_PL_GWS),
+ root, "amdgpu_gws_mm");
+ ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
+ AMDGPU_PL_OA),
+ root, "amdgpu_oa_mm");
+
#endif
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 6bdede8ff12b..0be2728aa872 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -24,56 +24,214 @@
#ifndef __AMDGPU_TTM_H__
#define __AMDGPU_TTM_H__
-#include "gpu_scheduler.h"
+#include <linux/dma-direction.h>
+#include <drm/gpu_scheduler.h>
+#include <drm/ttm/ttm_placement.h>
+#include "amdgpu_vram_mgr.h"
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
#define AMDGPU_PL_OA (TTM_PL_PRIV + 2)
+#define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3)
+#define AMDGPU_PL_DOORBELL (TTM_PL_PRIV + 4)
+#define AMDGPU_PL_MMIO_REMAP (TTM_PL_PRIV + 5)
+#define __AMDGPU_PL_NUM (TTM_PL_PRIV + 6)
-#define AMDGPU_PL_FLAG_GDS (TTM_PL_FLAG_PRIV << 0)
-#define AMDGPU_PL_FLAG_GWS (TTM_PL_FLAG_PRIV << 1)
-#define AMDGPU_PL_FLAG_OA (TTM_PL_FLAG_PRIV << 2)
+#define AMDGPU_GTT_MAX_TRANSFER_SIZE 512
+#define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
+
+extern const struct attribute_group amdgpu_vram_mgr_attr_group;
+extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
+
+struct hmm_range;
+
+struct amdgpu_gtt_mgr {
+ struct ttm_resource_manager manager;
+ struct drm_mm mm;
+ spinlock_t lock;
+};
struct amdgpu_mman {
- struct ttm_bo_global_ref bo_global_ref;
- struct drm_global_reference mem_global_ref;
- struct ttm_bo_device bdev;
- bool mem_global_referenced;
+ struct ttm_device bdev;
+ struct ttm_pool *ttm_pools;
bool initialized;
-
-#if defined(CONFIG_DEBUG_FS)
- struct dentry *vram;
- struct dentry *gtt;
-#endif
+ void __iomem *aper_base_kaddr;
/* buffer handling */
const struct amdgpu_buffer_funcs *buffer_funcs;
struct amdgpu_ring *buffer_funcs_ring;
- /* Scheduler entity for buffer moves */
- struct amd_sched_entity entity;
+ bool buffer_funcs_enabled;
+
+ struct mutex gtt_window_lock;
+ /* High priority scheduler entity for buffer moves */
+ struct drm_sched_entity high_pr;
+ /* Low priority scheduler entity for VRAM clearing */
+ struct drm_sched_entity low_pr;
+
+ struct amdgpu_vram_mgr vram_mgr;
+ struct amdgpu_gtt_mgr gtt_mgr;
+ struct ttm_resource_manager preempt_mgr;
+
+ uint64_t stolen_vga_size;
+ struct amdgpu_bo *stolen_vga_memory;
+ uint64_t stolen_extended_size;
+ struct amdgpu_bo *stolen_extended_memory;
+ bool keep_stolen_vga_memory;
+
+ struct amdgpu_bo *stolen_reserved_memory;
+ uint64_t stolen_reserved_offset;
+ uint64_t stolen_reserved_size;
+
+ /* discovery */
+ uint8_t *discovery_bin;
+ uint32_t discovery_tmr_size;
+ /* fw reserved memory */
+ struct amdgpu_bo *fw_reserved_memory;
+ struct amdgpu_bo *fw_reserved_memory_extend;
+
+ /* firmware VRAM reservation */
+ u64 fw_vram_usage_start_offset;
+ u64 fw_vram_usage_size;
+ struct amdgpu_bo *fw_vram_usage_reserved_bo;
+ void *fw_vram_usage_va;
+
+ /* driver VRAM reservation */
+ u64 drv_vram_usage_start_offset;
+ u64 drv_vram_usage_size;
+ struct amdgpu_bo *drv_vram_usage_reserved_bo;
+ void *drv_vram_usage_va;
+
+ /* PAGE_SIZE'd BO for process memory r/w over SDMA. */
+ struct amdgpu_bo *sdma_access_bo;
+ void *sdma_access_ptr;
+};
+
+struct amdgpu_copy_mem {
+ struct ttm_buffer_object *bo;
+ struct ttm_resource *mem;
+ unsigned long offset;
};
-extern const struct ttm_mem_type_manager_func amdgpu_gtt_mgr_func;
-extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
+#define AMDGPU_COPY_FLAGS_TMZ (1 << 0)
+#define AMDGPU_COPY_FLAGS_READ_DECOMPRESSED (1 << 1)
+#define AMDGPU_COPY_FLAGS_WRITE_COMPRESSED (1 << 2)
+#define AMDGPU_COPY_FLAGS_MAX_COMPRESSED_SHIFT 3
+#define AMDGPU_COPY_FLAGS_MAX_COMPRESSED_MASK 0x03
+#define AMDGPU_COPY_FLAGS_NUMBER_TYPE_SHIFT 5
+#define AMDGPU_COPY_FLAGS_NUMBER_TYPE_MASK 0x07
+#define AMDGPU_COPY_FLAGS_DATA_FORMAT_SHIFT 8
+#define AMDGPU_COPY_FLAGS_DATA_FORMAT_MASK 0x3f
+#define AMDGPU_COPY_FLAGS_WRITE_COMPRESS_DISABLE_SHIFT 14
+#define AMDGPU_COPY_FLAGS_WRITE_COMPRESS_DISABLE_MASK 0x1
+
+#define AMDGPU_COPY_FLAGS_SET(field, value) \
+ (((__u32)(value) & AMDGPU_COPY_FLAGS_##field##_MASK) << AMDGPU_COPY_FLAGS_##field##_SHIFT)
+#define AMDGPU_COPY_FLAGS_GET(value, field) \
+ (((__u32)(value) >> AMDGPU_COPY_FLAGS_##field##_SHIFT) & AMDGPU_COPY_FLAGS_##field##_MASK)
+
+int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size);
+void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev);
+int amdgpu_preempt_mgr_init(struct amdgpu_device *adev);
+void amdgpu_preempt_mgr_fini(struct amdgpu_device *adev);
+int amdgpu_vram_mgr_init(struct amdgpu_device *adev);
+void amdgpu_vram_mgr_fini(struct amdgpu_device *adev);
+
+bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem);
+void amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr);
-int amdgpu_gtt_mgr_alloc(struct ttm_mem_type_manager *man,
- struct ttm_buffer_object *tbo,
- const struct ttm_place *place,
- struct ttm_mem_reg *mem);
+uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man);
-int amdgpu_copy_buffer(struct amdgpu_ring *ring,
- uint64_t src_offset,
- uint64_t dst_offset,
- uint32_t byte_count,
- struct reservation_object *resv,
- struct dma_fence **fence, bool direct_submit);
+u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
+int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
+ struct ttm_resource *mem,
+ u64 offset, u64 size,
+ struct device *dev,
+ enum dma_data_direction dir,
+ struct sg_table **sgt);
+void amdgpu_vram_mgr_free_sgt(struct device *dev,
+ enum dma_data_direction dir,
+ struct sg_table *sgt);
+uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr);
+int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
+ uint64_t start, uint64_t size);
+int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
+ uint64_t start);
+void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev);
+
+bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
+ struct ttm_resource *res);
+
+int amdgpu_ttm_init(struct amdgpu_device *adev);
+void amdgpu_ttm_fini(struct amdgpu_device *adev);
+void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
+ bool enable);
+int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
+ uint64_t dst_offset, uint32_t byte_count,
+ struct dma_resv *resv,
+ struct dma_fence **fence, bool direct_submit,
+ bool vm_needs_flush, uint32_t copy_flags);
+int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
+ const struct amdgpu_copy_mem *src,
+ const struct amdgpu_copy_mem *dst,
+ uint64_t size, bool tmz,
+ struct dma_resv *resv,
+ struct dma_fence **f);
+int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
+ struct dma_resv *resv,
+ struct dma_fence **fence);
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t src_data,
- struct reservation_object *resv,
- struct dma_fence **fence);
+ struct dma_resv *resv,
+ struct dma_fence **fence,
+ bool delayed,
+ u64 k_job_id);
+
+int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
+void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
+uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
+
+#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
+int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
+ struct hmm_range **range);
+void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
+ struct hmm_range *range);
+bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
+ struct hmm_range *range);
+#else
+static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
+ struct hmm_range **range)
+{
+ return -EPERM;
+}
+static inline void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm,
+ struct hmm_range *range)
+{
+}
+static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
+ struct hmm_range *range)
+{
+ return false;
+}
+#endif
+
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct hmm_range *range);
+int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo,
+ uint64_t *user_addr);
+int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
+ uint64_t addr, uint32_t flags);
+bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
+struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
+bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
+ unsigned long end, unsigned long *userptr);
+bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
+ int *last_invalidated);
+bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm);
+bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
+uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem);
+uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
+ struct ttm_resource *mem);
+int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type);
-int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
-bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
-int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem);
+void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index dfd1c98efa7c..e96f24e9ad57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -24,10 +24,17 @@
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <drm/drmP.h>
+
#include "amdgpu.h"
#include "amdgpu_ucode.h"
+#define AMDGPU_UCODE_NAME_MAX (128)
+
+static const struct kicker_device kicker_device_list[] = {
+ {0x744B, 0x00},
+ {0x7551, 0xC8}
+};
+
static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr)
{
DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes));
@@ -68,15 +75,32 @@ void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr)
{
uint16_t version_major = le16_to_cpu(hdr->header_version_major);
uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+ const struct smc_firmware_header_v1_0 *v1_0_hdr;
+ const struct smc_firmware_header_v2_0 *v2_0_hdr;
+ const struct smc_firmware_header_v2_1 *v2_1_hdr;
DRM_DEBUG("SMC\n");
amdgpu_ucode_print_common_hdr(hdr);
if (version_major == 1) {
- const struct smc_firmware_header_v1_0 *smc_hdr =
- container_of(hdr, struct smc_firmware_header_v1_0, header);
+ v1_0_hdr = container_of(hdr, struct smc_firmware_header_v1_0, header);
+ DRM_DEBUG("ucode_start_addr: %u\n", le32_to_cpu(v1_0_hdr->ucode_start_addr));
+ } else if (version_major == 2) {
+ switch (version_minor) {
+ case 0:
+ v2_0_hdr = container_of(hdr, struct smc_firmware_header_v2_0, v1_0.header);
+ DRM_DEBUG("ppt_offset_bytes: %u\n", le32_to_cpu(v2_0_hdr->ppt_offset_bytes));
+ DRM_DEBUG("ppt_size_bytes: %u\n", le32_to_cpu(v2_0_hdr->ppt_size_bytes));
+ break;
+ case 1:
+ v2_1_hdr = container_of(hdr, struct smc_firmware_header_v2_1, v1_0.header);
+ DRM_DEBUG("pptable_count: %u\n", le32_to_cpu(v2_1_hdr->pptable_count));
+ DRM_DEBUG("pptable_entry_offset: %u\n", le32_to_cpu(v2_1_hdr->pptable_entry_offset));
+ break;
+ default:
+ break;
+ }
- DRM_DEBUG("ucode_start_addr: %u\n", le32_to_cpu(smc_hdr->ucode_start_addr));
} else {
DRM_ERROR("Unknown SMC ucode version: %u.%u\n", version_major, version_minor);
}
@@ -98,6 +122,12 @@ void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header *hdr)
le32_to_cpu(gfx_hdr->ucode_feature_version));
DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(gfx_hdr->jt_offset));
DRM_DEBUG("jt_size: %u\n", le32_to_cpu(gfx_hdr->jt_size));
+ } else if (version_major == 2) {
+ const struct gfx_firmware_header_v2_0 *gfx_hdr =
+ container_of(hdr, struct gfx_firmware_header_v2_0, header);
+
+ DRM_DEBUG("ucode_feature_version: %u\n",
+ le32_to_cpu(gfx_hdr->ucode_feature_version));
} else {
DRM_ERROR("Unknown GFX ucode version: %u.%u\n", version_major, version_minor);
}
@@ -128,41 +158,139 @@ void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr)
} else if (version_major == 2) {
const struct rlc_firmware_header_v2_0 *rlc_hdr =
container_of(hdr, struct rlc_firmware_header_v2_0, header);
+ const struct rlc_firmware_header_v2_1 *rlc_hdr_v2_1 =
+ container_of(rlc_hdr, struct rlc_firmware_header_v2_1, v2_0);
+ const struct rlc_firmware_header_v2_2 *rlc_hdr_v2_2 =
+ container_of(rlc_hdr_v2_1, struct rlc_firmware_header_v2_2, v2_1);
+ const struct rlc_firmware_header_v2_3 *rlc_hdr_v2_3 =
+ container_of(rlc_hdr_v2_2, struct rlc_firmware_header_v2_3, v2_2);
+ const struct rlc_firmware_header_v2_4 *rlc_hdr_v2_4 =
+ container_of(rlc_hdr_v2_3, struct rlc_firmware_header_v2_4, v2_3);
- DRM_DEBUG("ucode_feature_version: %u\n",
- le32_to_cpu(rlc_hdr->ucode_feature_version));
- DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(rlc_hdr->jt_offset));
- DRM_DEBUG("jt_size: %u\n", le32_to_cpu(rlc_hdr->jt_size));
- DRM_DEBUG("save_and_restore_offset: %u\n",
- le32_to_cpu(rlc_hdr->save_and_restore_offset));
- DRM_DEBUG("clear_state_descriptor_offset: %u\n",
- le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
- DRM_DEBUG("avail_scratch_ram_locations: %u\n",
- le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
- DRM_DEBUG("reg_restore_list_size: %u\n",
- le32_to_cpu(rlc_hdr->reg_restore_list_size));
- DRM_DEBUG("reg_list_format_start: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_format_start));
- DRM_DEBUG("reg_list_format_separate_start: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_format_separate_start));
- DRM_DEBUG("starting_offsets_start: %u\n",
- le32_to_cpu(rlc_hdr->starting_offsets_start));
- DRM_DEBUG("reg_list_format_size_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_format_size_bytes));
- DRM_DEBUG("reg_list_format_array_offset_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
- DRM_DEBUG("reg_list_size_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_size_bytes));
- DRM_DEBUG("reg_list_array_offset_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
- DRM_DEBUG("reg_list_format_separate_size_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_format_separate_size_bytes));
- DRM_DEBUG("reg_list_format_separate_array_offset_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_format_separate_array_offset_bytes));
- DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
- DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
- le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
+ switch (version_minor) {
+ case 0:
+ /* rlc_hdr v2_0 */
+ DRM_DEBUG("ucode_feature_version: %u\n",
+ le32_to_cpu(rlc_hdr->ucode_feature_version));
+ DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(rlc_hdr->jt_offset));
+ DRM_DEBUG("jt_size: %u\n", le32_to_cpu(rlc_hdr->jt_size));
+ DRM_DEBUG("save_and_restore_offset: %u\n",
+ le32_to_cpu(rlc_hdr->save_and_restore_offset));
+ DRM_DEBUG("clear_state_descriptor_offset: %u\n",
+ le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
+ DRM_DEBUG("avail_scratch_ram_locations: %u\n",
+ le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
+ DRM_DEBUG("reg_restore_list_size: %u\n",
+ le32_to_cpu(rlc_hdr->reg_restore_list_size));
+ DRM_DEBUG("reg_list_format_start: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_format_start));
+ DRM_DEBUG("reg_list_format_separate_start: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_format_separate_start));
+ DRM_DEBUG("starting_offsets_start: %u\n",
+ le32_to_cpu(rlc_hdr->starting_offsets_start));
+ DRM_DEBUG("reg_list_format_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_format_size_bytes));
+ DRM_DEBUG("reg_list_format_array_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
+ DRM_DEBUG("reg_list_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_size_bytes));
+ DRM_DEBUG("reg_list_array_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
+ DRM_DEBUG("reg_list_format_separate_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_format_separate_size_bytes));
+ DRM_DEBUG("reg_list_format_separate_array_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_format_separate_array_offset_bytes));
+ DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
+ DRM_DEBUG("reg_list_separate_array_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr->reg_list_separate_array_offset_bytes));
+ break;
+ case 1:
+ /* rlc_hdr v2_1 */
+ DRM_DEBUG("reg_list_format_direct_reg_list_length: %u\n",
+ le32_to_cpu(rlc_hdr_v2_1->reg_list_format_direct_reg_list_length));
+ DRM_DEBUG("save_restore_list_cntl_ucode_ver: %u\n",
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_cntl_ucode_ver));
+ DRM_DEBUG("save_restore_list_cntl_feature_ver: %u\n",
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_cntl_feature_ver));
+ DRM_DEBUG("save_restore_list_cntl_size_bytes %u\n",
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_cntl_size_bytes));
+ DRM_DEBUG("save_restore_list_cntl_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_cntl_offset_bytes));
+ DRM_DEBUG("save_restore_list_gpm_ucode_ver: %u\n",
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_gpm_ucode_ver));
+ DRM_DEBUG("save_restore_list_gpm_feature_ver: %u\n",
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_gpm_feature_ver));
+ DRM_DEBUG("save_restore_list_gpm_size_bytes %u\n",
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_gpm_size_bytes));
+ DRM_DEBUG("save_restore_list_gpm_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_gpm_offset_bytes));
+ DRM_DEBUG("save_restore_list_srm_ucode_ver: %u\n",
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_srm_ucode_ver));
+ DRM_DEBUG("save_restore_list_srm_feature_ver: %u\n",
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_srm_feature_ver));
+ DRM_DEBUG("save_restore_list_srm_size_bytes %u\n",
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_srm_size_bytes));
+ DRM_DEBUG("save_restore_list_srm_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_1->save_restore_list_srm_offset_bytes));
+ break;
+ case 2:
+ /* rlc_hdr v2_2 */
+ DRM_DEBUG("rlc_iram_ucode_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_2->rlc_iram_ucode_size_bytes));
+ DRM_DEBUG("rlc_iram_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_2->rlc_iram_ucode_offset_bytes));
+ DRM_DEBUG("rlc_dram_ucode_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_2->rlc_dram_ucode_size_bytes));
+ DRM_DEBUG("rlc_dram_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_2->rlc_dram_ucode_offset_bytes));
+ break;
+ case 3:
+ /* rlc_hdr v2_3 */
+ DRM_DEBUG("rlcp_ucode_version: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcp_ucode_version));
+ DRM_DEBUG("rlcp_ucode_feature_version: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcp_ucode_feature_version));
+ DRM_DEBUG("rlcp_ucode_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcp_ucode_size_bytes));
+ DRM_DEBUG("rlcp_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcp_ucode_offset_bytes));
+ DRM_DEBUG("rlcv_ucode_version: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcv_ucode_version));
+ DRM_DEBUG("rlcv_ucode_feature_version: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcv_ucode_feature_version));
+ DRM_DEBUG("rlcv_ucode_size_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcv_ucode_size_bytes));
+ DRM_DEBUG("rlcv_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_3->rlcv_ucode_offset_bytes));
+ break;
+ case 4:
+ /* rlc_hdr v2_4 */
+ DRM_DEBUG("global_tap_delays_ucode_size_bytes :%u\n",
+ le32_to_cpu(rlc_hdr_v2_4->global_tap_delays_ucode_size_bytes));
+ DRM_DEBUG("global_tap_delays_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_4->global_tap_delays_ucode_offset_bytes));
+ DRM_DEBUG("se0_tap_delays_ucode_size_bytes :%u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se0_tap_delays_ucode_size_bytes));
+ DRM_DEBUG("se0_tap_delays_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se0_tap_delays_ucode_offset_bytes));
+ DRM_DEBUG("se1_tap_delays_ucode_size_bytes :%u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se1_tap_delays_ucode_size_bytes));
+ DRM_DEBUG("se1_tap_delays_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se1_tap_delays_ucode_offset_bytes));
+ DRM_DEBUG("se2_tap_delays_ucode_size_bytes :%u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se2_tap_delays_ucode_size_bytes));
+ DRM_DEBUG("se2_tap_delays_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se2_tap_delays_ucode_offset_bytes));
+ DRM_DEBUG("se3_tap_delays_ucode_size_bytes :%u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se3_tap_delays_ucode_size_bytes));
+ DRM_DEBUG("se3_tap_delays_ucode_offset_bytes: %u\n",
+ le32_to_cpu(rlc_hdr_v2_4->se3_tap_delays_ucode_offset_bytes));
+ break;
+ default:
+ DRM_ERROR("Unknown RLC v2 ucode: v2.%u\n", version_minor);
+ break;
+ }
} else {
DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor);
}
@@ -191,13 +319,198 @@ void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr)
container_of(sdma_hdr, struct sdma_firmware_header_v1_1, v1_0);
DRM_DEBUG("digest_size: %u\n", le32_to_cpu(sdma_v1_1_hdr->digest_size));
}
+ } else if (version_major == 2) {
+ const struct sdma_firmware_header_v2_0 *sdma_hdr =
+ container_of(hdr, struct sdma_firmware_header_v2_0, header);
+
+ DRM_DEBUG("ucode_feature_version: %u\n",
+ le32_to_cpu(sdma_hdr->ucode_feature_version));
+ DRM_DEBUG("ctx_jt_offset: %u\n", le32_to_cpu(sdma_hdr->ctx_jt_offset));
+ DRM_DEBUG("ctx_jt_size: %u\n", le32_to_cpu(sdma_hdr->ctx_jt_size));
+ DRM_DEBUG("ctl_ucode_offset: %u\n", le32_to_cpu(sdma_hdr->ctl_ucode_offset));
+ DRM_DEBUG("ctl_jt_offset: %u\n", le32_to_cpu(sdma_hdr->ctl_jt_offset));
+ DRM_DEBUG("ctl_jt_size: %u\n", le32_to_cpu(sdma_hdr->ctl_jt_size));
+ } else if (version_major == 3) {
+ const struct sdma_firmware_header_v3_0 *sdma_hdr =
+ container_of(hdr, struct sdma_firmware_header_v3_0, header);
+
+ DRM_DEBUG("ucode_reversion: %u\n",
+ le32_to_cpu(sdma_hdr->ucode_feature_version));
} else {
DRM_ERROR("Unknown SDMA ucode version: %u.%u\n",
version_major, version_minor);
}
}
-int amdgpu_ucode_validate(const struct firmware *fw)
+void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr)
+{
+ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+ uint32_t fw_index;
+ const struct psp_fw_bin_desc *desc;
+
+ DRM_DEBUG("PSP\n");
+ amdgpu_ucode_print_common_hdr(hdr);
+
+ if (version_major == 1) {
+ const struct psp_firmware_header_v1_0 *psp_hdr =
+ container_of(hdr, struct psp_firmware_header_v1_0, header);
+
+ DRM_DEBUG("ucode_feature_version: %u\n",
+ le32_to_cpu(psp_hdr->sos.fw_version));
+ DRM_DEBUG("sos_offset_bytes: %u\n",
+ le32_to_cpu(psp_hdr->sos.offset_bytes));
+ DRM_DEBUG("sos_size_bytes: %u\n",
+ le32_to_cpu(psp_hdr->sos.size_bytes));
+ if (version_minor == 1) {
+ const struct psp_firmware_header_v1_1 *psp_hdr_v1_1 =
+ container_of(psp_hdr, struct psp_firmware_header_v1_1, v1_0);
+ DRM_DEBUG("toc_header_version: %u\n",
+ le32_to_cpu(psp_hdr_v1_1->toc.fw_version));
+ DRM_DEBUG("toc_offset_bytes: %u\n",
+ le32_to_cpu(psp_hdr_v1_1->toc.offset_bytes));
+ DRM_DEBUG("toc_size_bytes: %u\n",
+ le32_to_cpu(psp_hdr_v1_1->toc.size_bytes));
+ DRM_DEBUG("kdb_header_version: %u\n",
+ le32_to_cpu(psp_hdr_v1_1->kdb.fw_version));
+ DRM_DEBUG("kdb_offset_bytes: %u\n",
+ le32_to_cpu(psp_hdr_v1_1->kdb.offset_bytes));
+ DRM_DEBUG("kdb_size_bytes: %u\n",
+ le32_to_cpu(psp_hdr_v1_1->kdb.size_bytes));
+ }
+ if (version_minor == 2) {
+ const struct psp_firmware_header_v1_2 *psp_hdr_v1_2 =
+ container_of(psp_hdr, struct psp_firmware_header_v1_2, v1_0);
+ DRM_DEBUG("kdb_header_version: %u\n",
+ le32_to_cpu(psp_hdr_v1_2->kdb.fw_version));
+ DRM_DEBUG("kdb_offset_bytes: %u\n",
+ le32_to_cpu(psp_hdr_v1_2->kdb.offset_bytes));
+ DRM_DEBUG("kdb_size_bytes: %u\n",
+ le32_to_cpu(psp_hdr_v1_2->kdb.size_bytes));
+ }
+ if (version_minor == 3) {
+ const struct psp_firmware_header_v1_1 *psp_hdr_v1_1 =
+ container_of(psp_hdr, struct psp_firmware_header_v1_1, v1_0);
+ const struct psp_firmware_header_v1_3 *psp_hdr_v1_3 =
+ container_of(psp_hdr_v1_1, struct psp_firmware_header_v1_3, v1_1);
+ DRM_DEBUG("toc_header_version: %u\n",
+ le32_to_cpu(psp_hdr_v1_3->v1_1.toc.fw_version));
+ DRM_DEBUG("toc_offset_bytes: %u\n",
+ le32_to_cpu(psp_hdr_v1_3->v1_1.toc.offset_bytes));
+ DRM_DEBUG("toc_size_bytes: %u\n",
+ le32_to_cpu(psp_hdr_v1_3->v1_1.toc.size_bytes));
+ DRM_DEBUG("kdb_header_version: %u\n",
+ le32_to_cpu(psp_hdr_v1_3->v1_1.kdb.fw_version));
+ DRM_DEBUG("kdb_offset_bytes: %u\n",
+ le32_to_cpu(psp_hdr_v1_3->v1_1.kdb.offset_bytes));
+ DRM_DEBUG("kdb_size_bytes: %u\n",
+ le32_to_cpu(psp_hdr_v1_3->v1_1.kdb.size_bytes));
+ DRM_DEBUG("spl_header_version: %u\n",
+ le32_to_cpu(psp_hdr_v1_3->spl.fw_version));
+ DRM_DEBUG("spl_offset_bytes: %u\n",
+ le32_to_cpu(psp_hdr_v1_3->spl.offset_bytes));
+ DRM_DEBUG("spl_size_bytes: %u\n",
+ le32_to_cpu(psp_hdr_v1_3->spl.size_bytes));
+ }
+ } else if (version_major == 2) {
+ const struct psp_firmware_header_v2_0 *psp_hdr_v2_0 =
+ container_of(hdr, struct psp_firmware_header_v2_0, header);
+ for (fw_index = 0; fw_index < le32_to_cpu(psp_hdr_v2_0->psp_fw_bin_count); fw_index++) {
+ desc = &(psp_hdr_v2_0->psp_fw_bin[fw_index]);
+ switch (desc->fw_type) {
+ case PSP_FW_TYPE_PSP_SOS:
+ DRM_DEBUG("psp_sos_version: %u\n",
+ le32_to_cpu(desc->fw_version));
+ DRM_DEBUG("psp_sos_size_bytes: %u\n",
+ le32_to_cpu(desc->size_bytes));
+ break;
+ case PSP_FW_TYPE_PSP_SYS_DRV:
+ DRM_DEBUG("psp_sys_drv_version: %u\n",
+ le32_to_cpu(desc->fw_version));
+ DRM_DEBUG("psp_sys_drv_size_bytes: %u\n",
+ le32_to_cpu(desc->size_bytes));
+ break;
+ case PSP_FW_TYPE_PSP_KDB:
+ DRM_DEBUG("psp_kdb_version: %u\n",
+ le32_to_cpu(desc->fw_version));
+ DRM_DEBUG("psp_kdb_size_bytes: %u\n",
+ le32_to_cpu(desc->size_bytes));
+ break;
+ case PSP_FW_TYPE_PSP_TOC:
+ DRM_DEBUG("psp_toc_version: %u\n",
+ le32_to_cpu(desc->fw_version));
+ DRM_DEBUG("psp_toc_size_bytes: %u\n",
+ le32_to_cpu(desc->size_bytes));
+ break;
+ case PSP_FW_TYPE_PSP_SPL:
+ DRM_DEBUG("psp_spl_version: %u\n",
+ le32_to_cpu(desc->fw_version));
+ DRM_DEBUG("psp_spl_size_bytes: %u\n",
+ le32_to_cpu(desc->size_bytes));
+ break;
+ case PSP_FW_TYPE_PSP_RL:
+ DRM_DEBUG("psp_rl_version: %u\n",
+ le32_to_cpu(desc->fw_version));
+ DRM_DEBUG("psp_rl_size_bytes: %u\n",
+ le32_to_cpu(desc->size_bytes));
+ break;
+ case PSP_FW_TYPE_PSP_SOC_DRV:
+ DRM_DEBUG("psp_soc_drv_version: %u\n",
+ le32_to_cpu(desc->fw_version));
+ DRM_DEBUG("psp_soc_drv_size_bytes: %u\n",
+ le32_to_cpu(desc->size_bytes));
+ break;
+ case PSP_FW_TYPE_PSP_INTF_DRV:
+ DRM_DEBUG("psp_intf_drv_version: %u\n",
+ le32_to_cpu(desc->fw_version));
+ DRM_DEBUG("psp_intf_drv_size_bytes: %u\n",
+ le32_to_cpu(desc->size_bytes));
+ break;
+ case PSP_FW_TYPE_PSP_DBG_DRV:
+ DRM_DEBUG("psp_dbg_drv_version: %u\n",
+ le32_to_cpu(desc->fw_version));
+ DRM_DEBUG("psp_dbg_drv_size_bytes: %u\n",
+ le32_to_cpu(desc->size_bytes));
+ break;
+ case PSP_FW_TYPE_PSP_RAS_DRV:
+ DRM_DEBUG("psp_ras_drv_version: %u\n",
+ le32_to_cpu(desc->fw_version));
+ DRM_DEBUG("psp_ras_drv_size_bytes: %u\n",
+ le32_to_cpu(desc->size_bytes));
+ break;
+ default:
+ DRM_DEBUG("Unsupported PSP fw type: %d\n", desc->fw_type);
+ break;
+ }
+ }
+ } else {
+ DRM_ERROR("Unknown PSP ucode version: %u.%u\n",
+ version_major, version_minor);
+ }
+}
+
+void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr)
+{
+ uint16_t version_major = le16_to_cpu(hdr->header_version_major);
+ uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
+
+ DRM_DEBUG("GPU_INFO\n");
+ amdgpu_ucode_print_common_hdr(hdr);
+
+ if (version_major == 1) {
+ const struct gpu_info_firmware_header_v1_0 *gpu_info_hdr =
+ container_of(hdr, struct gpu_info_firmware_header_v1_0, header);
+
+ DRM_DEBUG("version_major: %u\n",
+ le16_to_cpu(gpu_info_hdr->version_major));
+ DRM_DEBUG("version_minor: %u\n",
+ le16_to_cpu(gpu_info_hdr->version_minor));
+ } else {
+ DRM_ERROR("Unknown gpu_info ucode version: %u.%u\n", version_major, version_minor);
+ }
+}
+
+static int amdgpu_ucode_validate(const struct firmware *fw)
{
const struct common_firmware_header *hdr =
(const struct common_firmware_header *)fw->data;
@@ -213,8 +526,8 @@ bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
{
if ((hdr->common.header_version_major == hdr_major) &&
(hdr->common.header_version_minor == hdr_minor))
- return false;
- return true;
+ return true;
+ return false;
}
enum amdgpu_firmware_load_type
@@ -226,6 +539,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
case CHIP_PITCAIRN:
case CHIP_VERDE:
case CHIP_OLAND:
+ case CHIP_HAINAN:
return AMDGPU_FW_LOAD_DIRECT;
#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
@@ -244,20 +558,268 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
- if (!load_type)
+ case CHIP_VEGAM:
+ return AMDGPU_FW_LOAD_SMU;
+ case CHIP_CYAN_SKILLFISH:
+ if (!(load_type &&
+ adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2))
return AMDGPU_FW_LOAD_DIRECT;
else
- return AMDGPU_FW_LOAD_SMU;
- case CHIP_VEGA10:
+ return AMDGPU_FW_LOAD_PSP;
+ default:
if (!load_type)
return AMDGPU_FW_LOAD_DIRECT;
+ else if (load_type == 3)
+ return AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO;
else
return AMDGPU_FW_LOAD_PSP;
+ }
+}
+
+const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id)
+{
+ switch (ucode_id) {
+ case AMDGPU_UCODE_ID_SDMA0:
+ return "SDMA0";
+ case AMDGPU_UCODE_ID_SDMA1:
+ return "SDMA1";
+ case AMDGPU_UCODE_ID_SDMA2:
+ return "SDMA2";
+ case AMDGPU_UCODE_ID_SDMA3:
+ return "SDMA3";
+ case AMDGPU_UCODE_ID_SDMA4:
+ return "SDMA4";
+ case AMDGPU_UCODE_ID_SDMA5:
+ return "SDMA5";
+ case AMDGPU_UCODE_ID_SDMA6:
+ return "SDMA6";
+ case AMDGPU_UCODE_ID_SDMA7:
+ return "SDMA7";
+ case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
+ return "SDMA_CTX";
+ case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
+ return "SDMA_CTL";
+ case AMDGPU_UCODE_ID_CP_CE:
+ return "CP_CE";
+ case AMDGPU_UCODE_ID_CP_PFP:
+ return "CP_PFP";
+ case AMDGPU_UCODE_ID_CP_ME:
+ return "CP_ME";
+ case AMDGPU_UCODE_ID_CP_MEC1:
+ return "CP_MEC1";
+ case AMDGPU_UCODE_ID_CP_MEC1_JT:
+ return "CP_MEC1_JT";
+ case AMDGPU_UCODE_ID_CP_MEC2:
+ return "CP_MEC2";
+ case AMDGPU_UCODE_ID_CP_MEC2_JT:
+ return "CP_MEC2_JT";
+ case AMDGPU_UCODE_ID_CP_MES:
+ return "CP_MES";
+ case AMDGPU_UCODE_ID_CP_MES_DATA:
+ return "CP_MES_DATA";
+ case AMDGPU_UCODE_ID_CP_MES1:
+ return "CP_MES_KIQ";
+ case AMDGPU_UCODE_ID_CP_MES1_DATA:
+ return "CP_MES_KIQ_DATA";
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
+ return "RLC_RESTORE_LIST_CNTL";
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
+ return "RLC_RESTORE_LIST_GPM_MEM";
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
+ return "RLC_RESTORE_LIST_SRM_MEM";
+ case AMDGPU_UCODE_ID_RLC_IRAM:
+ return "RLC_IRAM";
+ case AMDGPU_UCODE_ID_RLC_DRAM:
+ return "RLC_DRAM";
+ case AMDGPU_UCODE_ID_RLC_G:
+ return "RLC_G";
+ case AMDGPU_UCODE_ID_RLC_P:
+ return "RLC_P";
+ case AMDGPU_UCODE_ID_RLC_V:
+ return "RLC_V";
+ case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
+ return "GLOBAL_TAP_DELAYS";
+ case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
+ return "SE0_TAP_DELAYS";
+ case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
+ return "SE1_TAP_DELAYS";
+ case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
+ return "SE2_TAP_DELAYS";
+ case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
+ return "SE3_TAP_DELAYS";
+ case AMDGPU_UCODE_ID_IMU_I:
+ return "IMU_I";
+ case AMDGPU_UCODE_ID_IMU_D:
+ return "IMU_D";
+ case AMDGPU_UCODE_ID_STORAGE:
+ return "STORAGE";
+ case AMDGPU_UCODE_ID_SMC:
+ return "SMC";
+ case AMDGPU_UCODE_ID_PPTABLE:
+ return "PPTABLE";
+ case AMDGPU_UCODE_ID_P2S_TABLE:
+ return "P2STABLE";
+ case AMDGPU_UCODE_ID_UVD:
+ return "UVD";
+ case AMDGPU_UCODE_ID_UVD1:
+ return "UVD1";
+ case AMDGPU_UCODE_ID_VCE:
+ return "VCE";
+ case AMDGPU_UCODE_ID_VCN:
+ return "VCN";
+ case AMDGPU_UCODE_ID_VCN1:
+ return "VCN1";
+ case AMDGPU_UCODE_ID_DMCU_ERAM:
+ return "DMCU_ERAM";
+ case AMDGPU_UCODE_ID_DMCU_INTV:
+ return "DMCU_INTV";
+ case AMDGPU_UCODE_ID_VCN0_RAM:
+ return "VCN0_RAM";
+ case AMDGPU_UCODE_ID_VCN1_RAM:
+ return "VCN1_RAM";
+ case AMDGPU_UCODE_ID_DMCUB:
+ return "DMCUB";
+ case AMDGPU_UCODE_ID_CAP:
+ return "CAP";
+ case AMDGPU_UCODE_ID_VPE_CTX:
+ return "VPE_CTX";
+ case AMDGPU_UCODE_ID_VPE_CTL:
+ return "VPE_CTL";
+ case AMDGPU_UCODE_ID_VPE:
+ return "VPE";
+ case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
+ return "UMSCH_MM_UCODE";
+ case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
+ return "UMSCH_MM_DATA";
+ case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
+ return "UMSCH_MM_CMD_BUFFER";
+ case AMDGPU_UCODE_ID_JPEG_RAM:
+ return "JPEG";
+ case AMDGPU_UCODE_ID_SDMA_RS64:
+ return "RS64_SDMA";
+ case AMDGPU_UCODE_ID_CP_RS64_PFP:
+ return "RS64_PFP";
+ case AMDGPU_UCODE_ID_CP_RS64_ME:
+ return "RS64_ME";
+ case AMDGPU_UCODE_ID_CP_RS64_MEC:
+ return "RS64_MEC";
+ case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
+ return "RS64_PFP_P0_STACK";
+ case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
+ return "RS64_PFP_P1_STACK";
+ case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
+ return "RS64_ME_P0_STACK";
+ case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
+ return "RS64_ME_P1_STACK";
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
+ return "RS64_MEC_P0_STACK";
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
+ return "RS64_MEC_P1_STACK";
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
+ return "RS64_MEC_P2_STACK";
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
+ return "RS64_MEC_P3_STACK";
+ case AMDGPU_UCODE_ID_ISP:
+ return "ISP";
default:
- DRM_ERROR("Unknow firmware load type\n");
+ return "UNKNOWN UCODE";
}
+}
+
+static inline int amdgpu_ucode_is_valid(uint32_t fw_version)
+{
+ if (!fw_version)
+ return -EINVAL;
- return AMDGPU_FW_LOAD_DIRECT;
+ return 0;
+}
+
+#define FW_VERSION_ATTR(name, mode, field) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct drm_device *ddev = dev_get_drvdata(dev); \
+ struct amdgpu_device *adev = drm_to_adev(ddev); \
+ \
+ if (!buf) \
+ return amdgpu_ucode_is_valid(adev->field); \
+ \
+ return sysfs_emit(buf, "0x%08x\n", adev->field); \
+} \
+static DEVICE_ATTR(name, mode, show_##name, NULL)
+
+FW_VERSION_ATTR(vce_fw_version, 0444, vce.fw_version);
+FW_VERSION_ATTR(uvd_fw_version, 0444, uvd.fw_version);
+FW_VERSION_ATTR(mc_fw_version, 0444, gmc.fw_version);
+FW_VERSION_ATTR(me_fw_version, 0444, gfx.me_fw_version);
+FW_VERSION_ATTR(pfp_fw_version, 0444, gfx.pfp_fw_version);
+FW_VERSION_ATTR(ce_fw_version, 0444, gfx.ce_fw_version);
+FW_VERSION_ATTR(rlc_fw_version, 0444, gfx.rlc_fw_version);
+FW_VERSION_ATTR(rlc_srlc_fw_version, 0444, gfx.rlc_srlc_fw_version);
+FW_VERSION_ATTR(rlc_srlg_fw_version, 0444, gfx.rlc_srlg_fw_version);
+FW_VERSION_ATTR(rlc_srls_fw_version, 0444, gfx.rlc_srls_fw_version);
+FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version);
+FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version);
+FW_VERSION_ATTR(imu_fw_version, 0444, gfx.imu_fw_version);
+FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos.fw_version);
+FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_context.bin_desc.fw_version);
+FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ras_context.context.bin_desc.fw_version);
+FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.xgmi_context.context.bin_desc.fw_version);
+FW_VERSION_ATTR(smc_fw_version, 0444, pm.fw_version);
+FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version);
+FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version);
+FW_VERSION_ATTR(vcn_fw_version, 0444, vcn.fw_version);
+FW_VERSION_ATTR(dmcu_fw_version, 0444, dm.dmcu_fw_version);
+FW_VERSION_ATTR(dmcub_fw_version, 0444, dm.dmcub_fw_version);
+FW_VERSION_ATTR(mes_fw_version, 0444, mes.sched_version & AMDGPU_MES_VERSION_MASK);
+FW_VERSION_ATTR(mes_kiq_fw_version, 0444, mes.kiq_version & AMDGPU_MES_VERSION_MASK);
+FW_VERSION_ATTR(pldm_fw_version, 0444, firmware.pldm_version);
+
+static struct attribute *fw_attrs[] = {
+ &dev_attr_vce_fw_version.attr, &dev_attr_uvd_fw_version.attr,
+ &dev_attr_mc_fw_version.attr, &dev_attr_me_fw_version.attr,
+ &dev_attr_pfp_fw_version.attr, &dev_attr_ce_fw_version.attr,
+ &dev_attr_rlc_fw_version.attr, &dev_attr_rlc_srlc_fw_version.attr,
+ &dev_attr_rlc_srlg_fw_version.attr, &dev_attr_rlc_srls_fw_version.attr,
+ &dev_attr_mec_fw_version.attr, &dev_attr_mec2_fw_version.attr,
+ &dev_attr_sos_fw_version.attr, &dev_attr_asd_fw_version.attr,
+ &dev_attr_ta_ras_fw_version.attr, &dev_attr_ta_xgmi_fw_version.attr,
+ &dev_attr_smc_fw_version.attr, &dev_attr_sdma_fw_version.attr,
+ &dev_attr_sdma2_fw_version.attr, &dev_attr_vcn_fw_version.attr,
+ &dev_attr_dmcu_fw_version.attr, &dev_attr_dmcub_fw_version.attr,
+ &dev_attr_imu_fw_version.attr, &dev_attr_mes_fw_version.attr,
+ &dev_attr_mes_kiq_fw_version.attr, &dev_attr_pldm_fw_version.attr,
+ NULL
+};
+
+#define to_dev_attr(x) container_of(x, struct device_attribute, attr)
+
+static umode_t amdgpu_ucode_sys_visible(struct kobject *kobj,
+ struct attribute *attr, int idx)
+{
+ struct device_attribute *dev_attr = to_dev_attr(attr);
+ struct device *dev = kobj_to_dev(kobj);
+
+ if (dev_attr->show(dev, dev_attr, NULL) == -EINVAL)
+ return 0;
+
+ return attr->mode;
+}
+
+static const struct attribute_group fw_attr_group = {
+ .name = "fw_version",
+ .attrs = fw_attrs,
+ .is_visible = amdgpu_ucode_sys_visible
+};
+
+int amdgpu_ucode_sysfs_init(struct amdgpu_device *adev)
+{
+ return sysfs_create_group(&adev->dev->kobj, &fw_attr_group);
+}
+
+void amdgpu_ucode_sysfs_fini(struct amdgpu_device *adev)
+{
+ sysfs_remove_group(&adev->dev->kobj, &fw_attr_group);
}
static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
@@ -266,8 +828,18 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
{
const struct common_firmware_header *header = NULL;
const struct gfx_firmware_header_v1_0 *cp_hdr = NULL;
+ const struct gfx_firmware_header_v2_0 *cpv2_hdr = NULL;
+ const struct dmcu_firmware_header_v1_0 *dmcu_hdr = NULL;
+ const struct dmcub_firmware_header_v1_0 *dmcub_hdr = NULL;
+ const struct mes_firmware_header_v1_0 *mes_hdr = NULL;
+ const struct sdma_firmware_header_v2_0 *sdma_hdr = NULL;
+ const struct sdma_firmware_header_v3_0 *sdmav3_hdr = NULL;
+ const struct imu_firmware_header_v1_0 *imu_hdr = NULL;
+ const struct vpe_firmware_header_v1_0 *vpe_hdr = NULL;
+ const struct umsch_mm_firmware_header_v1_0 *umsch_mm_hdr = NULL;
+ u8 *ucode_addr;
- if (NULL == ucode->fw)
+ if (!ucode->fw)
return 0;
ucode->mc_addr = mc_addr;
@@ -277,37 +849,241 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
return 0;
header = (const struct common_firmware_header *)ucode->fw->data;
-
cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
+ cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)ucode->fw->data;
+ dmcu_hdr = (const struct dmcu_firmware_header_v1_0 *)ucode->fw->data;
+ dmcub_hdr = (const struct dmcub_firmware_header_v1_0 *)ucode->fw->data;
+ mes_hdr = (const struct mes_firmware_header_v1_0 *)ucode->fw->data;
+ sdma_hdr = (const struct sdma_firmware_header_v2_0 *)ucode->fw->data;
+ sdmav3_hdr = (const struct sdma_firmware_header_v3_0 *)ucode->fw->data;
+ imu_hdr = (const struct imu_firmware_header_v1_0 *)ucode->fw->data;
+ vpe_hdr = (const struct vpe_firmware_header_v1_0 *)ucode->fw->data;
+ umsch_mm_hdr = (const struct umsch_mm_firmware_header_v1_0 *)ucode->fw->data;
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP ||
- (ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1 &&
- ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2 &&
- ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1_JT &&
- ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2_JT)) {
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ switch (ucode->ucode_id) {
+ case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
+ ucode->ucode_size = le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
+ ucode->ucode_size = le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(sdma_hdr->ctl_ucode_offset);
+ break;
+ case AMDGPU_UCODE_ID_SDMA_RS64:
+ ucode->ucode_size = le32_to_cpu(sdmav3_hdr->ucode_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(sdmav3_hdr->header.ucode_array_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC1:
+ case AMDGPU_UCODE_ID_CP_MEC2:
+ ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) -
+ le32_to_cpu(cp_hdr->jt_size) * 4;
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(header->ucode_array_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC1_JT:
+ case AMDGPU_UCODE_ID_CP_MEC2_JT:
+ ucode->ucode_size = le32_to_cpu(cp_hdr->jt_size) * 4;
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(header->ucode_array_offset_bytes) +
+ le32_to_cpu(cp_hdr->jt_offset) * 4;
+ break;
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
+ ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes;
+ ucode_addr = adev->gfx.rlc.save_restore_list_cntl;
+ break;
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
+ ucode->ucode_size = adev->gfx.rlc.save_restore_list_gpm_size_bytes;
+ ucode_addr = adev->gfx.rlc.save_restore_list_gpm;
+ break;
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
+ ucode->ucode_size = adev->gfx.rlc.save_restore_list_srm_size_bytes;
+ ucode_addr = adev->gfx.rlc.save_restore_list_srm;
+ break;
+ case AMDGPU_UCODE_ID_RLC_IRAM:
+ ucode->ucode_size = adev->gfx.rlc.rlc_iram_ucode_size_bytes;
+ ucode_addr = adev->gfx.rlc.rlc_iram_ucode;
+ break;
+ case AMDGPU_UCODE_ID_RLC_DRAM:
+ ucode->ucode_size = adev->gfx.rlc.rlc_dram_ucode_size_bytes;
+ ucode_addr = adev->gfx.rlc.rlc_dram_ucode;
+ break;
+ case AMDGPU_UCODE_ID_RLC_P:
+ ucode->ucode_size = adev->gfx.rlc.rlcp_ucode_size_bytes;
+ ucode_addr = adev->gfx.rlc.rlcp_ucode;
+ break;
+ case AMDGPU_UCODE_ID_RLC_V:
+ ucode->ucode_size = adev->gfx.rlc.rlcv_ucode_size_bytes;
+ ucode_addr = adev->gfx.rlc.rlcv_ucode;
+ break;
+ case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
+ ucode->ucode_size = adev->gfx.rlc.global_tap_delays_ucode_size_bytes;
+ ucode_addr = adev->gfx.rlc.global_tap_delays_ucode;
+ break;
+ case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
+ ucode->ucode_size = adev->gfx.rlc.se0_tap_delays_ucode_size_bytes;
+ ucode_addr = adev->gfx.rlc.se0_tap_delays_ucode;
+ break;
+ case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
+ ucode->ucode_size = adev->gfx.rlc.se1_tap_delays_ucode_size_bytes;
+ ucode_addr = adev->gfx.rlc.se1_tap_delays_ucode;
+ break;
+ case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
+ ucode->ucode_size = adev->gfx.rlc.se2_tap_delays_ucode_size_bytes;
+ ucode_addr = adev->gfx.rlc.se2_tap_delays_ucode;
+ break;
+ case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
+ ucode->ucode_size = adev->gfx.rlc.se3_tap_delays_ucode_size_bytes;
+ ucode_addr = adev->gfx.rlc.se3_tap_delays_ucode;
+ break;
+ case AMDGPU_UCODE_ID_CP_MES:
+ ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(mes_hdr->mes_ucode_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_MES_DATA:
+ ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_MES1:
+ ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(mes_hdr->mes_ucode_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_MES1_DATA:
+ ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_DMCU_ERAM:
+ ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) -
+ le32_to_cpu(dmcu_hdr->intv_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(header->ucode_array_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_DMCU_INTV:
+ ucode->ucode_size = le32_to_cpu(dmcu_hdr->intv_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(header->ucode_array_offset_bytes) +
+ le32_to_cpu(dmcu_hdr->intv_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_DMCUB:
+ ucode->ucode_size = le32_to_cpu(dmcub_hdr->inst_const_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(header->ucode_array_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_PPTABLE:
+ ucode->ucode_size = ucode->fw->size;
+ ucode_addr = (u8 *)ucode->fw->data;
+ break;
+ case AMDGPU_UCODE_ID_P2S_TABLE:
+ ucode->ucode_size = ucode->fw->size;
+ ucode_addr = (u8 *)ucode->fw->data;
+ break;
+ case AMDGPU_UCODE_ID_IMU_I:
+ ucode->ucode_size = le32_to_cpu(imu_hdr->imu_iram_ucode_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(imu_hdr->header.ucode_array_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_IMU_D:
+ ucode->ucode_size = le32_to_cpu(imu_hdr->imu_dram_ucode_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(imu_hdr->header.ucode_array_offset_bytes) +
+ le32_to_cpu(imu_hdr->imu_iram_ucode_size_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_PFP:
+ ucode->ucode_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(header->ucode_array_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
+ ucode->ucode_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(cpv2_hdr->data_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
+ ucode->ucode_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(cpv2_hdr->data_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_ME:
+ ucode->ucode_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(header->ucode_array_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
+ ucode->ucode_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(cpv2_hdr->data_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
+ ucode->ucode_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(cpv2_hdr->data_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_MEC:
+ ucode->ucode_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(header->ucode_array_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
+ ucode->ucode_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(cpv2_hdr->data_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
+ ucode->ucode_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(cpv2_hdr->data_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
+ ucode->ucode_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(cpv2_hdr->data_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
+ ucode->ucode_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(cpv2_hdr->data_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_VPE_CTX:
+ ucode->ucode_size = le32_to_cpu(vpe_hdr->ctx_ucode_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(vpe_hdr->header.ucode_array_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_VPE_CTL:
+ ucode->ucode_size = le32_to_cpu(vpe_hdr->ctl_ucode_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(vpe_hdr->ctl_ucode_offset);
+ break;
+ case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
+ ucode->ucode_size = le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(umsch_mm_hdr->header.ucode_array_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
+ ucode->ucode_size = le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_data_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_data_offset_bytes);
+ break;
+ default:
+ ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(header->ucode_array_offset_bytes);
+ break;
+ }
+ } else {
ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes);
-
- memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
- le32_to_cpu(header->ucode_array_offset_bytes)),
- ucode->ucode_size);
- } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1 ||
- ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2) {
- ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) -
- le32_to_cpu(cp_hdr->jt_size) * 4;
-
- memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
- le32_to_cpu(header->ucode_array_offset_bytes)),
- ucode->ucode_size);
- } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
- ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT) {
- ucode->ucode_size = le32_to_cpu(cp_hdr->jt_size) * 4;
-
- memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
- le32_to_cpu(header->ucode_array_offset_bytes) +
- le32_to_cpu(cp_hdr->jt_offset) * 4),
- ucode->ucode_size);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(header->ucode_array_offset_bytes);
}
+ memcpy(ucode->kaddr, ucode_addr, ucode->ucode_size);
+
return 0;
}
@@ -316,10 +1092,10 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
{
const struct gfx_firmware_header_v1_0 *header = NULL;
const struct common_firmware_header *comm_hdr = NULL;
- uint8_t* src_addr = NULL;
- uint8_t* dst_addr = NULL;
+ uint8_t *src_addr = NULL;
+ uint8_t *dst_addr = NULL;
- if (NULL == ucode->fw)
+ if (!ucode->fw)
return 0;
comm_hdr = (const struct common_firmware_header *)ucode->fw->data;
@@ -332,52 +1108,45 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
(le32_to_cpu(header->jt_offset) * 4);
memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4);
- ucode->ucode_size += le32_to_cpu(header->jt_size) * 4;
+ return 0;
+}
+int amdgpu_ucode_create_bo(struct amdgpu_device *adev)
+{
+ if ((adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) &&
+ (adev->firmware.load_type != AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)) {
+ amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE,
+ (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
+ AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+ &adev->firmware.fw_buf,
+ &adev->firmware.fw_buf_mc,
+ &adev->firmware.fw_buf_ptr);
+ if (!adev->firmware.fw_buf) {
+ dev_err(adev->dev, "failed to create kernel buffer for firmware.fw_buf\n");
+ return -ENOMEM;
+ } else if (amdgpu_sriov_vf(adev)) {
+ memset(adev->firmware.fw_buf_ptr, 0, adev->firmware.fw_size);
+ }
+ }
return 0;
}
+void amdgpu_ucode_free_bo(struct amdgpu_device *adev)
+{
+ amdgpu_bo_free_kernel(&adev->firmware.fw_buf,
+ &adev->firmware.fw_buf_mc,
+ &adev->firmware.fw_buf_ptr);
+}
+
int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
{
- struct amdgpu_bo **bo = &adev->firmware.fw_buf;
- uint64_t fw_mc_addr;
- void *fw_buf_ptr = NULL;
uint64_t fw_offset = 0;
- int i, err;
+ int i;
struct amdgpu_firmware_info *ucode = NULL;
- const struct common_firmware_header *header = NULL;
-
- err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
- amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
- 0, NULL, NULL, bo);
- if (err) {
- dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
- goto failed;
- }
-
- err = amdgpu_bo_reserve(*bo, false);
- if (err) {
- dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err);
- goto failed_reserve;
- }
-
- err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
- &fw_mc_addr);
- if (err) {
- dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
- goto failed_pin;
- }
-
- err = amdgpu_bo_kmap(*bo, &fw_buf_ptr);
- if (err) {
- dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err);
- goto failed_kmap;
- }
-
- amdgpu_bo_unreserve(*bo);
-
- memset(fw_buf_ptr, 0, adev->firmware.fw_size);
+ /* for baremetal, the ucode is allocated in gtt, so don't need to fill the bo when reset/suspend */
+ if (!amdgpu_sriov_vf(adev) && (amdgpu_in_reset(adev) || adev->in_suspend))
+ return 0;
/*
* if SMU loaded firmware, it needn't add SMC, UVD, and VCE
* ucode info here
@@ -391,52 +1160,357 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM;
}
+ if (amdgpu_virt_xgmi_migrate_enabled(adev) && adev->firmware.fw_buf)
+ adev->firmware.fw_buf_mc = amdgpu_bo_fb_aper_addr(adev->firmware.fw_buf);
+
for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i];
if (ucode->fw) {
- header = (const struct common_firmware_header *)ucode->fw->data;
- amdgpu_ucode_init_single_fw(adev, ucode, fw_mc_addr + fw_offset,
- (void *)((uint8_t *)fw_buf_ptr + fw_offset));
+ amdgpu_ucode_init_single_fw(adev, ucode, adev->firmware.fw_buf_mc + fw_offset,
+ adev->firmware.fw_buf_ptr + fw_offset);
if (i == AMDGPU_UCODE_ID_CP_MEC1 &&
adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
const struct gfx_firmware_header_v1_0 *cp_hdr;
+
cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
- amdgpu_ucode_patch_jt(ucode, fw_mc_addr + fw_offset,
- fw_buf_ptr + fw_offset);
+ amdgpu_ucode_patch_jt(ucode, adev->firmware.fw_buf_mc + fw_offset,
+ adev->firmware.fw_buf_ptr + fw_offset);
fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
}
fw_offset += ALIGN(ucode->ucode_size, PAGE_SIZE);
}
}
return 0;
+}
-failed_kmap:
- amdgpu_bo_unpin(*bo);
-failed_pin:
- amdgpu_bo_unreserve(*bo);
-failed_reserve:
- amdgpu_bo_unref(bo);
-failed:
- if (err)
- adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT;
-
- return err;
+static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int block_type)
+{
+ if (block_type == MP0_HWIP) {
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+ case IP_VERSION(9, 0, 0):
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ return "vega10";
+ case CHIP_VEGA12:
+ return "vega12";
+ default:
+ return NULL;
+ }
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ if (adev->asic_type == CHIP_RAVEN) {
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ return "raven2";
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
+ return "picasso";
+ return "raven";
+ }
+ break;
+ case IP_VERSION(11, 0, 0):
+ return "navi10";
+ case IP_VERSION(11, 0, 2):
+ return "vega20";
+ case IP_VERSION(11, 0, 3):
+ return "renoir";
+ case IP_VERSION(11, 0, 4):
+ return "arcturus";
+ case IP_VERSION(11, 0, 5):
+ return "navi14";
+ case IP_VERSION(11, 0, 7):
+ return "sienna_cichlid";
+ case IP_VERSION(11, 0, 9):
+ return "navi12";
+ case IP_VERSION(11, 0, 11):
+ return "navy_flounder";
+ case IP_VERSION(11, 0, 12):
+ return "dimgrey_cavefish";
+ case IP_VERSION(11, 0, 13):
+ return "beige_goby";
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 5, 2):
+ return "vangogh";
+ case IP_VERSION(12, 0, 1):
+ return "green_sardine";
+ case IP_VERSION(13, 0, 2):
+ return "aldebaran";
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 3):
+ return "yellow_carp";
+ }
+ } else if (block_type == MP1_HWIP) {
+ switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+ case IP_VERSION(9, 0, 0):
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ case IP_VERSION(11, 0, 2):
+ if (adev->asic_type == CHIP_ARCTURUS)
+ return "arcturus_smc";
+ return NULL;
+ case IP_VERSION(11, 0, 0):
+ return "navi10_smc";
+ case IP_VERSION(11, 0, 5):
+ return "navi14_smc";
+ case IP_VERSION(11, 0, 9):
+ return "navi12_smc";
+ case IP_VERSION(11, 0, 7):
+ return "sienna_cichlid_smc";
+ case IP_VERSION(11, 0, 11):
+ return "navy_flounder_smc";
+ case IP_VERSION(11, 0, 12):
+ return "dimgrey_cavefish_smc";
+ case IP_VERSION(11, 0, 13):
+ return "beige_goby_smc";
+ case IP_VERSION(13, 0, 2):
+ return "aldebaran_smc";
+ }
+ } else if (block_type == SDMA0_HWIP) {
+ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
+ case IP_VERSION(4, 0, 0):
+ return "vega10_sdma";
+ case IP_VERSION(4, 0, 1):
+ return "vega12_sdma";
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ return "raven2_sdma";
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
+ return "picasso_sdma";
+ return "raven_sdma";
+ case IP_VERSION(4, 1, 2):
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ return "renoir_sdma";
+ return "green_sardine_sdma";
+ case IP_VERSION(4, 2, 0):
+ return "vega20_sdma";
+ case IP_VERSION(4, 2, 2):
+ return "arcturus_sdma";
+ case IP_VERSION(4, 4, 0):
+ return "aldebaran_sdma";
+ case IP_VERSION(5, 0, 0):
+ return "navi10_sdma";
+ case IP_VERSION(5, 0, 1):
+ return "cyan_skillfish2_sdma";
+ case IP_VERSION(5, 0, 2):
+ return "navi14_sdma";
+ case IP_VERSION(5, 0, 5):
+ return "navi12_sdma";
+ case IP_VERSION(5, 2, 0):
+ return "sienna_cichlid_sdma";
+ case IP_VERSION(5, 2, 2):
+ return "navy_flounder_sdma";
+ case IP_VERSION(5, 2, 4):
+ return "dimgrey_cavefish_sdma";
+ case IP_VERSION(5, 2, 5):
+ return "beige_goby_sdma";
+ case IP_VERSION(5, 2, 3):
+ return "yellow_carp_sdma";
+ case IP_VERSION(5, 2, 1):
+ return "vangogh_sdma";
+ }
+ } else if (block_type == UVD_HWIP) {
+ switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ return "raven2_vcn";
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
+ return "picasso_vcn";
+ return "raven_vcn";
+ case IP_VERSION(2, 5, 0):
+ return "arcturus_vcn";
+ case IP_VERSION(2, 2, 0):
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ return "renoir_vcn";
+ return "green_sardine_vcn";
+ case IP_VERSION(2, 6, 0):
+ return "aldebaran_vcn";
+ case IP_VERSION(2, 0, 0):
+ return "navi10_vcn";
+ case IP_VERSION(2, 0, 2):
+ if (adev->asic_type == CHIP_NAVI12)
+ return "navi12_vcn";
+ return "navi14_vcn";
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 64):
+ case IP_VERSION(3, 0, 192):
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
+ IP_VERSION(10, 3, 0))
+ return "sienna_cichlid_vcn";
+ return "navy_flounder_vcn";
+ case IP_VERSION(3, 0, 2):
+ return "vangogh_vcn";
+ case IP_VERSION(3, 0, 16):
+ return "dimgrey_cavefish_vcn";
+ case IP_VERSION(3, 0, 33):
+ return "beige_goby_vcn";
+ case IP_VERSION(3, 1, 1):
+ return "yellow_carp_vcn";
+ }
+ } else if (block_type == GC_HWIP) {
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 0, 1):
+ return "vega10";
+ case IP_VERSION(9, 2, 1):
+ return "vega12";
+ case IP_VERSION(9, 4, 0):
+ return "vega20";
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ return "raven2";
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
+ return "picasso";
+ return "raven";
+ case IP_VERSION(9, 4, 1):
+ return "arcturus";
+ case IP_VERSION(9, 3, 0):
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ return "renoir";
+ return "green_sardine";
+ case IP_VERSION(9, 4, 2):
+ return "aldebaran";
+ case IP_VERSION(10, 1, 10):
+ return "navi10";
+ case IP_VERSION(10, 1, 1):
+ return "navi14";
+ case IP_VERSION(10, 1, 2):
+ return "navi12";
+ case IP_VERSION(10, 3, 0):
+ return "sienna_cichlid";
+ case IP_VERSION(10, 3, 2):
+ return "navy_flounder";
+ case IP_VERSION(10, 3, 1):
+ return "vangogh";
+ case IP_VERSION(10, 3, 4):
+ return "dimgrey_cavefish";
+ case IP_VERSION(10, 3, 5):
+ return "beige_goby";
+ case IP_VERSION(10, 3, 3):
+ return "yellow_carp";
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ return "cyan_skillfish2";
+ }
+ }
+ return NULL;
}
-int amdgpu_ucode_fini_bo(struct amdgpu_device *adev)
+bool amdgpu_is_kicker_fw(struct amdgpu_device *adev)
{
int i;
- struct amdgpu_firmware_info *ucode = NULL;
- for (i = 0; i < adev->firmware.max_ucodes; i++) {
- ucode = &adev->firmware.ucode[i];
- if (ucode->fw) {
- ucode->mc_addr = 0;
- ucode->kaddr = NULL;
- }
+ for (i = 0; i < ARRAY_SIZE(kicker_device_list); i++) {
+ if (adev->pdev->device == kicker_device_list[i].device &&
+ adev->pdev->revision == kicker_device_list[i].revision)
+ return true;
}
- amdgpu_bo_unref(&adev->firmware.fw_buf);
- adev->firmware.fw_buf = NULL;
- return 0;
+ return false;
+}
+
+void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len)
+{
+ int maj, min, rev;
+ char *ip_name;
+ const char *legacy;
+ uint32_t version = amdgpu_ip_version(adev, block_type, 0);
+
+ legacy = amdgpu_ucode_legacy_naming(adev, block_type);
+ if (legacy) {
+ snprintf(ucode_prefix, len, "%s", legacy);
+ return;
+ }
+
+ switch (block_type) {
+ case GC_HWIP:
+ ip_name = "gc";
+ break;
+ case SDMA0_HWIP:
+ ip_name = "sdma";
+ break;
+ case MP0_HWIP:
+ ip_name = "psp";
+ break;
+ case MP1_HWIP:
+ ip_name = "smu";
+ break;
+ case UVD_HWIP:
+ ip_name = "vcn";
+ break;
+ case VPE_HWIP:
+ ip_name = "vpe";
+ break;
+ case ISP_HWIP:
+ ip_name = "isp";
+ break;
+ default:
+ BUG();
+ }
+
+ maj = IP_VERSION_MAJ(version);
+ min = IP_VERSION_MIN(version);
+ rev = IP_VERSION_REV(version);
+
+ snprintf(ucode_prefix, len, "%s_%d_%d_%d", ip_name, maj, min, rev);
+}
+
+/*
+ * amdgpu_ucode_request - Fetch and validate amdgpu microcode
+ *
+ * @adev: amdgpu device
+ * @fw: pointer to load firmware to
+ * @required: whether the firmware is required
+ * @fmt: firmware name format string
+ * @...: variable arguments
+ *
+ * This is a helper that will use request_firmware and amdgpu_ucode_validate
+ * to load and run basic validation on firmware. If the load fails, remap
+ * the error code to -ENODEV, so that early_init functions will fail to load.
+ */
+int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
+ enum amdgpu_ucode_required required, const char *fmt, ...)
+{
+ char fname[AMDGPU_UCODE_NAME_MAX];
+ va_list ap;
+ int r;
+
+ va_start(ap, fmt);
+ r = vsnprintf(fname, sizeof(fname), fmt, ap);
+ va_end(ap);
+ if (r == sizeof(fname)) {
+ dev_warn(adev->dev, "amdgpu firmware name buffer overflow\n");
+ return -EOVERFLOW;
+ }
+
+ if (required == AMDGPU_UCODE_REQUIRED)
+ r = request_firmware(fw, fname, adev->dev);
+ else {
+ r = firmware_request_nowarn(fw, fname, adev->dev);
+ if (r)
+ drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fname);
+ }
+ if (r)
+ return -ENODEV;
+
+ r = amdgpu_ucode_validate(*fw);
+ if (r)
+ /*
+ * The amdgpu_ucode_request() should be paired with amdgpu_ucode_release()
+ * regardless of success/failure, and the amdgpu_ucode_release() takes care of
+ * firmware release and need to avoid redundant release FW operation here.
+ */
+ dev_dbg(adev->dev, "\"%s\" failed to validate\n", fname);
+
+ return r;
+}
+
+/*
+ * amdgpu_ucode_release - Release firmware microcode
+ *
+ * @fw: pointer to firmware to release
+ */
+void amdgpu_ucode_release(const struct firmware **fw)
+{
+ release_firmware(*fw);
+ *fw = NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 758f03a1770d..6349aad6da35 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -23,6 +23,10 @@
#ifndef __AMDGPU_UCODE_H__
#define __AMDGPU_UCODE_H__
+#include "amdgpu_socbb.h"
+
+#define RS64_FW_UC_START_ADDR_LO 0x3000
+
struct common_firmware_header {
uint32_t size_bytes; /* size of the entire header+image(s) in bytes */
uint32_t header_size_bytes; /* size of just the header in bytes */
@@ -49,12 +53,128 @@ struct smc_firmware_header_v1_0 {
uint32_t ucode_start_addr;
};
+/* version_major=2, version_minor=0 */
+struct smc_firmware_header_v2_0 {
+ struct smc_firmware_header_v1_0 v1_0;
+ uint32_t ppt_offset_bytes; /* soft pptable offset */
+ uint32_t ppt_size_bytes; /* soft pptable size */
+};
+
+struct smc_soft_pptable_entry {
+ uint32_t id;
+ uint32_t ppt_offset_bytes;
+ uint32_t ppt_size_bytes;
+};
+
+/* version_major=2, version_minor=1 */
+struct smc_firmware_header_v2_1 {
+ struct smc_firmware_header_v1_0 v1_0;
+ uint32_t pptable_count;
+ uint32_t pptable_entry_offset;
+};
+
+struct psp_fw_legacy_bin_desc {
+ uint32_t fw_version;
+ uint32_t offset_bytes;
+ uint32_t size_bytes;
+};
+
/* version_major=1, version_minor=0 */
struct psp_firmware_header_v1_0 {
struct common_firmware_header header;
- uint32_t ucode_feature_version;
- uint32_t sos_offset_bytes;
- uint32_t sos_size_bytes;
+ struct psp_fw_legacy_bin_desc sos;
+};
+
+/* version_major=1, version_minor=1 */
+struct psp_firmware_header_v1_1 {
+ struct psp_firmware_header_v1_0 v1_0;
+ struct psp_fw_legacy_bin_desc toc;
+ struct psp_fw_legacy_bin_desc kdb;
+};
+
+/* version_major=1, version_minor=2 */
+struct psp_firmware_header_v1_2 {
+ struct psp_firmware_header_v1_0 v1_0;
+ struct psp_fw_legacy_bin_desc res;
+ struct psp_fw_legacy_bin_desc kdb;
+};
+
+/* version_major=1, version_minor=3 */
+struct psp_firmware_header_v1_3 {
+ struct psp_firmware_header_v1_1 v1_1;
+ struct psp_fw_legacy_bin_desc spl;
+ struct psp_fw_legacy_bin_desc rl;
+ struct psp_fw_legacy_bin_desc sys_drv_aux;
+ struct psp_fw_legacy_bin_desc sos_aux;
+};
+
+struct psp_fw_bin_desc {
+ uint32_t fw_type;
+ uint32_t fw_version;
+ uint32_t offset_bytes;
+ uint32_t size_bytes;
+};
+
+enum psp_fw_type {
+ PSP_FW_TYPE_UNKOWN,
+ PSP_FW_TYPE_PSP_SOS,
+ PSP_FW_TYPE_PSP_SYS_DRV,
+ PSP_FW_TYPE_PSP_KDB,
+ PSP_FW_TYPE_PSP_TOC,
+ PSP_FW_TYPE_PSP_SPL,
+ PSP_FW_TYPE_PSP_RL,
+ PSP_FW_TYPE_PSP_SOC_DRV,
+ PSP_FW_TYPE_PSP_INTF_DRV,
+ PSP_FW_TYPE_PSP_DBG_DRV,
+ PSP_FW_TYPE_PSP_RAS_DRV,
+ PSP_FW_TYPE_PSP_IPKEYMGR_DRV,
+ PSP_FW_TYPE_PSP_SPDM_DRV,
+ PSP_FW_TYPE_MAX_INDEX,
+};
+
+/* version_major=2, version_minor=0 */
+struct psp_firmware_header_v2_0 {
+ struct common_firmware_header header;
+ uint32_t psp_fw_bin_count;
+ struct psp_fw_bin_desc psp_fw_bin[];
+};
+
+/* version_major=2, version_minor=1 */
+struct psp_firmware_header_v2_1 {
+ struct common_firmware_header header;
+ uint32_t psp_fw_bin_count;
+ uint32_t psp_aux_fw_bin_index;
+ struct psp_fw_bin_desc psp_fw_bin[];
+};
+
+/* version_major=1, version_minor=0 */
+struct ta_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ struct psp_fw_legacy_bin_desc xgmi;
+ struct psp_fw_legacy_bin_desc ras;
+ struct psp_fw_legacy_bin_desc hdcp;
+ struct psp_fw_legacy_bin_desc dtm;
+ struct psp_fw_legacy_bin_desc securedisplay;
+};
+
+enum ta_fw_type {
+ TA_FW_TYPE_UNKOWN,
+ TA_FW_TYPE_PSP_ASD,
+ TA_FW_TYPE_PSP_XGMI,
+ TA_FW_TYPE_PSP_RAS,
+ TA_FW_TYPE_PSP_HDCP,
+ TA_FW_TYPE_PSP_DTM,
+ TA_FW_TYPE_PSP_RAP,
+ TA_FW_TYPE_PSP_SECUREDISPLAY,
+ TA_FW_TYPE_PSP_XGMI_AUX,
+ TA_FW_TYPE_MAX_INDEX,
+};
+
+/* version_major=2, version_minor=0 */
+struct ta_firmware_header_v2_0 {
+ struct common_firmware_header header;
+ uint32_t ta_fw_bin_count;
+ struct psp_fw_bin_desc ta_fw_bin[];
};
/* version_major=1, version_minor=0 */
@@ -65,6 +185,33 @@ struct gfx_firmware_header_v1_0 {
uint32_t jt_size; /* size of jt */
};
+/* version_major=2, version_minor=0 */
+struct gfx_firmware_header_v2_0 {
+ struct common_firmware_header header;
+ uint32_t ucode_feature_version;
+ uint32_t ucode_size_bytes;
+ uint32_t ucode_offset_bytes;
+ uint32_t data_size_bytes;
+ uint32_t data_offset_bytes;
+ uint32_t ucode_start_addr_lo;
+ uint32_t ucode_start_addr_hi;
+};
+
+/* version_major=1, version_minor=0 */
+struct mes_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t mes_ucode_version;
+ uint32_t mes_ucode_size_bytes;
+ uint32_t mes_ucode_offset_bytes;
+ uint32_t mes_ucode_data_version;
+ uint32_t mes_ucode_data_size_bytes;
+ uint32_t mes_ucode_data_offset_bytes;
+ uint32_t mes_uc_start_addr_lo;
+ uint32_t mes_uc_start_addr_hi;
+ uint32_t mes_data_start_addr_lo;
+ uint32_t mes_data_start_addr_hi;
+};
+
/* version_major=1, version_minor=0 */
struct rlc_firmware_header_v1_0 {
struct common_firmware_header header;
@@ -98,6 +245,61 @@ struct rlc_firmware_header_v2_0 {
uint32_t reg_list_separate_array_offset_bytes; /* payload offset from the start of the header */
};
+/* version_major=2, version_minor=1 */
+struct rlc_firmware_header_v2_1 {
+ struct rlc_firmware_header_v2_0 v2_0;
+ uint32_t reg_list_format_direct_reg_list_length; /* length of direct reg list format array */
+ uint32_t save_restore_list_cntl_ucode_ver;
+ uint32_t save_restore_list_cntl_feature_ver;
+ uint32_t save_restore_list_cntl_size_bytes;
+ uint32_t save_restore_list_cntl_offset_bytes;
+ uint32_t save_restore_list_gpm_ucode_ver;
+ uint32_t save_restore_list_gpm_feature_ver;
+ uint32_t save_restore_list_gpm_size_bytes;
+ uint32_t save_restore_list_gpm_offset_bytes;
+ uint32_t save_restore_list_srm_ucode_ver;
+ uint32_t save_restore_list_srm_feature_ver;
+ uint32_t save_restore_list_srm_size_bytes;
+ uint32_t save_restore_list_srm_offset_bytes;
+};
+
+/* version_major=2, version_minor=2 */
+struct rlc_firmware_header_v2_2 {
+ struct rlc_firmware_header_v2_1 v2_1;
+ uint32_t rlc_iram_ucode_size_bytes;
+ uint32_t rlc_iram_ucode_offset_bytes;
+ uint32_t rlc_dram_ucode_size_bytes;
+ uint32_t rlc_dram_ucode_offset_bytes;
+};
+
+/* version_major=2, version_minor=3 */
+struct rlc_firmware_header_v2_3 {
+ struct rlc_firmware_header_v2_2 v2_2;
+ uint32_t rlcp_ucode_version;
+ uint32_t rlcp_ucode_feature_version;
+ uint32_t rlcp_ucode_size_bytes;
+ uint32_t rlcp_ucode_offset_bytes;
+ uint32_t rlcv_ucode_version;
+ uint32_t rlcv_ucode_feature_version;
+ uint32_t rlcv_ucode_size_bytes;
+ uint32_t rlcv_ucode_offset_bytes;
+};
+
+/* version_major=2, version_minor=4 */
+struct rlc_firmware_header_v2_4 {
+ struct rlc_firmware_header_v2_3 v2_3;
+ uint32_t global_tap_delays_ucode_size_bytes;
+ uint32_t global_tap_delays_ucode_offset_bytes;
+ uint32_t se0_tap_delays_ucode_size_bytes;
+ uint32_t se0_tap_delays_ucode_offset_bytes;
+ uint32_t se1_tap_delays_ucode_size_bytes;
+ uint32_t se1_tap_delays_ucode_offset_bytes;
+ uint32_t se2_tap_delays_ucode_size_bytes;
+ uint32_t se2_tap_delays_ucode_offset_bytes;
+ uint32_t se3_tap_delays_ucode_size_bytes;
+ uint32_t se3_tap_delays_ucode_offset_bytes;
+};
+
/* version_major=1, version_minor=0 */
struct sdma_firmware_header_v1_0 {
struct common_firmware_header header;
@@ -113,38 +315,228 @@ struct sdma_firmware_header_v1_1 {
uint32_t digest_size;
};
+/* version_major=2, version_minor=0 */
+struct sdma_firmware_header_v2_0 {
+ struct common_firmware_header header;
+ uint32_t ucode_feature_version;
+ uint32_t ctx_ucode_size_bytes; /* context thread ucode size */
+ uint32_t ctx_jt_offset; /* context thread jt location */
+ uint32_t ctx_jt_size; /* context thread size of jt */
+ uint32_t ctl_ucode_offset;
+ uint32_t ctl_ucode_size_bytes; /* control thread ucode size */
+ uint32_t ctl_jt_offset; /* control thread jt location */
+ uint32_t ctl_jt_size; /* control thread size of jt */
+};
+
+/* version_major=1, version_minor=0 */
+struct vpe_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t ucode_feature_version;
+ uint32_t ctx_ucode_size_bytes; /* context thread ucode size */
+ uint32_t ctx_jt_offset; /* context thread jt location */
+ uint32_t ctx_jt_size; /* context thread size of jt */
+ uint32_t ctl_ucode_offset;
+ uint32_t ctl_ucode_size_bytes; /* control thread ucode size */
+ uint32_t ctl_jt_offset; /* control thread jt location */
+ uint32_t ctl_jt_size; /* control thread size of jt */
+};
+
+/* version_major=1, version_minor=0 */
+struct umsch_mm_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t umsch_mm_ucode_version;
+ uint32_t umsch_mm_ucode_size_bytes;
+ uint32_t umsch_mm_ucode_offset_bytes;
+ uint32_t umsch_mm_ucode_data_version;
+ uint32_t umsch_mm_ucode_data_size_bytes;
+ uint32_t umsch_mm_ucode_data_offset_bytes;
+ uint32_t umsch_mm_irq_start_addr_lo;
+ uint32_t umsch_mm_irq_start_addr_hi;
+ uint32_t umsch_mm_uc_start_addr_lo;
+ uint32_t umsch_mm_uc_start_addr_hi;
+ uint32_t umsch_mm_data_start_addr_lo;
+ uint32_t umsch_mm_data_start_addr_hi;
+};
+
+/* version_major=3, version_minor=0 */
+struct sdma_firmware_header_v3_0 {
+ struct common_firmware_header header;
+ uint32_t ucode_feature_version;
+ uint32_t ucode_offset_bytes;
+ uint32_t ucode_size_bytes;
+};
+
+/* gpu info payload */
+struct gpu_info_firmware_v1_0 {
+ uint32_t gc_num_se;
+ uint32_t gc_num_cu_per_sh;
+ uint32_t gc_num_sh_per_se;
+ uint32_t gc_num_rb_per_se;
+ uint32_t gc_num_tccs;
+ uint32_t gc_num_gprs;
+ uint32_t gc_num_max_gs_thds;
+ uint32_t gc_gs_table_depth;
+ uint32_t gc_gsprim_buff_depth;
+ uint32_t gc_parameter_cache_depth;
+ uint32_t gc_double_offchip_lds_buffer;
+ uint32_t gc_wave_size;
+ uint32_t gc_max_waves_per_simd;
+ uint32_t gc_max_scratch_slots_per_cu;
+ uint32_t gc_lds_size;
+};
+
+struct gpu_info_firmware_v1_1 {
+ struct gpu_info_firmware_v1_0 v1_0;
+ uint32_t num_sc_per_sh;
+ uint32_t num_packer_per_sc;
+};
+
+/* gpu info payload
+ * version_major=1, version_minor=1 */
+struct gpu_info_firmware_v1_2 {
+ struct gpu_info_firmware_v1_1 v1_1;
+ struct gpu_info_soc_bounding_box_v1_0 soc_bounding_box;
+};
+
+/* version_major=1, version_minor=0 */
+struct gpu_info_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint16_t version_major; /* version */
+ uint16_t version_minor; /* version */
+};
+
+/* version_major=1, version_minor=0 */
+struct dmcu_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t intv_offset_bytes; /* interrupt vectors offset from end of header, in bytes */
+ uint32_t intv_size_bytes; /* size of interrupt vectors, in bytes */
+};
+
+/* version_major=1, version_minor=0 */
+struct dmcub_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t inst_const_bytes; /* size of instruction region, in bytes */
+ uint32_t bss_data_bytes; /* size of bss/data region, in bytes */
+};
+
+/* version_major=1, version_minor=0 */
+struct imu_firmware_header_v1_0 {
+ struct common_firmware_header header;
+ uint32_t imu_iram_ucode_size_bytes;
+ uint32_t imu_iram_ucode_offset_bytes;
+ uint32_t imu_dram_ucode_size_bytes;
+ uint32_t imu_dram_ucode_offset_bytes;
+};
+
/* header is fixed size */
union amdgpu_firmware_header {
struct common_firmware_header common;
struct mc_firmware_header_v1_0 mc;
struct smc_firmware_header_v1_0 smc;
+ struct smc_firmware_header_v2_0 smc_v2_0;
struct psp_firmware_header_v1_0 psp;
+ struct psp_firmware_header_v1_1 psp_v1_1;
+ struct psp_firmware_header_v1_3 psp_v1_3;
+ struct psp_firmware_header_v2_0 psp_v2_0;
+ struct psp_firmware_header_v2_0 psp_v2_1;
+ struct ta_firmware_header_v1_0 ta;
+ struct ta_firmware_header_v2_0 ta_v2_0;
struct gfx_firmware_header_v1_0 gfx;
+ struct gfx_firmware_header_v2_0 gfx_v2_0;
struct rlc_firmware_header_v1_0 rlc;
struct rlc_firmware_header_v2_0 rlc_v2_0;
+ struct rlc_firmware_header_v2_1 rlc_v2_1;
+ struct rlc_firmware_header_v2_2 rlc_v2_2;
+ struct rlc_firmware_header_v2_3 rlc_v2_3;
+ struct rlc_firmware_header_v2_4 rlc_v2_4;
struct sdma_firmware_header_v1_0 sdma;
struct sdma_firmware_header_v1_1 sdma_v1_1;
+ struct sdma_firmware_header_v2_0 sdma_v2_0;
+ struct sdma_firmware_header_v3_0 sdma_v3_0;
+ struct gpu_info_firmware_header_v1_0 gpu_info;
+ struct dmcu_firmware_header_v1_0 dmcu;
+ struct dmcub_firmware_header_v1_0 dmcub;
+ struct imu_firmware_header_v1_0 imu;
uint8_t raw[0x100];
};
+#define UCODE_MAX_PSP_PACKAGING (((sizeof(union amdgpu_firmware_header) - sizeof(struct common_firmware_header) - 4) / sizeof(struct psp_fw_bin_desc)) * 2)
+
/*
* fw loading support
*/
enum AMDGPU_UCODE_ID {
- AMDGPU_UCODE_ID_SDMA0 = 0,
+ AMDGPU_UCODE_ID_CAP = 0,
+ AMDGPU_UCODE_ID_SDMA0,
AMDGPU_UCODE_ID_SDMA1,
+ AMDGPU_UCODE_ID_SDMA2,
+ AMDGPU_UCODE_ID_SDMA3,
+ AMDGPU_UCODE_ID_SDMA4,
+ AMDGPU_UCODE_ID_SDMA5,
+ AMDGPU_UCODE_ID_SDMA6,
+ AMDGPU_UCODE_ID_SDMA7,
+ AMDGPU_UCODE_ID_SDMA_UCODE_TH0,
+ AMDGPU_UCODE_ID_SDMA_UCODE_TH1,
+ AMDGPU_UCODE_ID_SDMA_RS64,
AMDGPU_UCODE_ID_CP_CE,
AMDGPU_UCODE_ID_CP_PFP,
AMDGPU_UCODE_ID_CP_ME,
+ AMDGPU_UCODE_ID_CP_RS64_PFP,
+ AMDGPU_UCODE_ID_CP_RS64_ME,
+ AMDGPU_UCODE_ID_CP_RS64_MEC,
+ AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK,
+ AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK,
+ AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK,
+ AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK,
+ AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK,
+ AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK,
+ AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK,
+ AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK,
AMDGPU_UCODE_ID_CP_MEC1,
AMDGPU_UCODE_ID_CP_MEC1_JT,
AMDGPU_UCODE_ID_CP_MEC2,
AMDGPU_UCODE_ID_CP_MEC2_JT,
+ AMDGPU_UCODE_ID_CP_MES,
+ AMDGPU_UCODE_ID_CP_MES_DATA,
+ AMDGPU_UCODE_ID_CP_MES1,
+ AMDGPU_UCODE_ID_CP_MES1_DATA,
+ AMDGPU_UCODE_ID_IMU_I,
+ AMDGPU_UCODE_ID_IMU_D,
+ AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS,
+ AMDGPU_UCODE_ID_SE0_TAP_DELAYS,
+ AMDGPU_UCODE_ID_SE1_TAP_DELAYS,
+ AMDGPU_UCODE_ID_SE2_TAP_DELAYS,
+ AMDGPU_UCODE_ID_SE3_TAP_DELAYS,
+ AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL,
+ AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM,
+ AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM,
+ AMDGPU_UCODE_ID_RLC_IRAM,
+ AMDGPU_UCODE_ID_RLC_DRAM,
+ AMDGPU_UCODE_ID_RLC_P,
+ AMDGPU_UCODE_ID_RLC_V,
AMDGPU_UCODE_ID_RLC_G,
AMDGPU_UCODE_ID_STORAGE,
AMDGPU_UCODE_ID_SMC,
+ AMDGPU_UCODE_ID_PPTABLE,
AMDGPU_UCODE_ID_UVD,
+ AMDGPU_UCODE_ID_UVD1,
AMDGPU_UCODE_ID_VCE,
+ AMDGPU_UCODE_ID_VCN,
+ AMDGPU_UCODE_ID_VCN1,
+ AMDGPU_UCODE_ID_DMCU_ERAM,
+ AMDGPU_UCODE_ID_DMCU_INTV,
+ AMDGPU_UCODE_ID_VCN0_RAM,
+ AMDGPU_UCODE_ID_VCN1_RAM,
+ AMDGPU_UCODE_ID_DMCUB,
+ AMDGPU_UCODE_ID_VPE_CTX,
+ AMDGPU_UCODE_ID_VPE_CTL,
+ AMDGPU_UCODE_ID_VPE,
+ AMDGPU_UCODE_ID_UMSCH_MM_UCODE,
+ AMDGPU_UCODE_ID_UMSCH_MM_DATA,
+ AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER,
+ AMDGPU_UCODE_ID_P2S_TABLE,
+ AMDGPU_UCODE_ID_JPEG_RAM,
+ AMDGPU_UCODE_ID_ISP,
AMDGPU_UCODE_ID_MAXIMUM,
};
@@ -155,6 +547,18 @@ enum AMDGPU_UCODE_STATUS {
AMDGPU_UCODE_STATUS_LOADED,
};
+enum amdgpu_firmware_load_type {
+ AMDGPU_FW_LOAD_DIRECT = 0,
+ AMDGPU_FW_LOAD_PSP,
+ AMDGPU_FW_LOAD_SMU,
+ AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO,
+};
+
+enum amdgpu_ucode_required {
+ AMDGPU_UCODE_OPTIONAL,
+ AMDGPU_UCODE_REQUIRED,
+};
+
/* conform to smu_ucode_xfer_cz.h */
#define AMDGPU_SDMA0_UCODE_LOADED 0x00000001
#define AMDGPU_SDMA1_UCODE_LOADED 0x00000002
@@ -177,20 +581,62 @@ struct amdgpu_firmware_info {
void *kaddr;
/* ucode_size_bytes */
uint32_t ucode_size;
+ /* starting tmr mc address */
+ uint32_t tmr_mc_addr_lo;
+ uint32_t tmr_mc_addr_hi;
+};
+
+struct amdgpu_firmware {
+ struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM];
+ enum amdgpu_firmware_load_type load_type;
+ struct amdgpu_bo *fw_buf;
+ unsigned int fw_size;
+ unsigned int max_ucodes;
+ /* firmwares are loaded by psp instead of smu from vega10 */
+ const struct amdgpu_psp_funcs *funcs;
+ struct amdgpu_bo *rbuf;
+ struct mutex mutex;
+
+ /* gpu info firmware data pointer */
+ const struct firmware *gpu_info_fw;
+
+ void *fw_buf_ptr;
+ uint64_t fw_buf_mc;
+ uint32_t pldm_version;
+};
+
+struct kicker_device{
+ unsigned short device;
+ u8 revision;
};
void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr);
+void amdgpu_ucode_print_imu_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr);
-int amdgpu_ucode_validate(const struct firmware *fw);
+void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr);
+void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr);
+__printf(4, 5)
+int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
+ enum amdgpu_ucode_required required, const char *fmt, ...);
+void amdgpu_ucode_release(const struct firmware **fw);
bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
uint16_t hdr_major, uint16_t hdr_minor);
+
int amdgpu_ucode_init_bo(struct amdgpu_device *adev);
-int amdgpu_ucode_fini_bo(struct amdgpu_device *adev);
+int amdgpu_ucode_create_bo(struct amdgpu_device *adev);
+int amdgpu_ucode_sysfs_init(struct amdgpu_device *adev);
+void amdgpu_ucode_free_bo(struct amdgpu_device *adev);
+void amdgpu_ucode_sysfs_fini(struct amdgpu_device *adev);
enum amdgpu_firmware_load_type
amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type);
+const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id);
+
+void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len);
+bool amdgpu_is_kicker_fw(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
new file mode 100644
index 000000000000..2e039fb778ea
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -0,0 +1,588 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/sort.h>
+#include "amdgpu.h"
+#include "umc_v6_7.h"
+#define MAX_UMC_POISON_POLLING_TIME_SYNC 20 //ms
+
+#define MAX_UMC_HASH_STRING_SIZE 256
+
+static int amdgpu_umc_convert_error_address(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t err_addr,
+ uint32_t ch_inst, uint32_t umc_inst)
+{
+ switch (amdgpu_ip_version(adev, UMC_HWIP, 0)) {
+ case IP_VERSION(6, 7, 0):
+ umc_v6_7_convert_error_address(adev,
+ err_data, err_addr, ch_inst, umc_inst);
+ break;
+ default:
+ dev_warn(adev->dev,
+ "UMC address to Physical address translation is not supported\n");
+ return AMDGPU_RAS_FAIL;
+ }
+
+ return AMDGPU_RAS_SUCCESS;
+}
+
+int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
+ uint64_t err_addr, uint32_t ch_inst, uint32_t umc_inst)
+{
+ struct ras_err_data err_data;
+ int ret;
+
+ ret = amdgpu_ras_error_data_init(&err_data);
+ if (ret)
+ return ret;
+
+ err_data.err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+ if (!err_data.err_addr) {
+ dev_warn(adev->dev,
+ "Failed to alloc memory for umc error record in MCA notifier!\n");
+ ret = AMDGPU_RAS_FAIL;
+ goto out_fini_err_data;
+ }
+
+ err_data.err_addr_len = adev->umc.max_ras_err_cnt_per_query;
+
+ /*
+ * Translate UMC channel address to Physical address
+ */
+ ret = amdgpu_umc_convert_error_address(adev, &err_data, err_addr,
+ ch_inst, umc_inst);
+ if (ret)
+ goto out_free_err_addr;
+
+ if (amdgpu_bad_page_threshold != 0) {
+ amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
+ err_data.err_addr_cnt, false);
+ amdgpu_ras_save_bad_pages(adev, NULL);
+ }
+
+out_free_err_addr:
+ kfree(err_data.err_addr);
+
+out_fini_err_data:
+ amdgpu_ras_error_data_fini(&err_data);
+
+ return ret;
+}
+
+void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ unsigned int error_query_mode;
+ int ret = 0;
+ unsigned long err_count;
+
+ amdgpu_ras_get_error_query_mode(adev, &error_query_mode);
+
+ mutex_lock(&con->page_retirement_lock);
+ ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc));
+ if (ret == -EOPNOTSUPP &&
+ error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
+ if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, ras_error_status);
+
+ if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_address &&
+ adev->umc.max_ras_err_cnt_per_query) {
+ err_data->err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+
+ /* still call query_ras_error_address to clear error status
+ * even NOMEM error is encountered
+ */
+ if(!err_data->err_addr)
+ dev_warn(adev->dev, "Failed to alloc memory for "
+ "umc error address record!\n");
+ else
+ err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query;
+
+ /* umc query_ras_error_address is also responsible for clearing
+ * error status
+ */
+ adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, ras_error_status);
+ }
+ } else if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY ||
+ (!ret && error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY)) {
+ if (adev->umc.ras &&
+ adev->umc.ras->ecc_info_query_ras_error_count)
+ adev->umc.ras->ecc_info_query_ras_error_count(adev, ras_error_status);
+
+ if (adev->umc.ras &&
+ adev->umc.ras->ecc_info_query_ras_error_address &&
+ adev->umc.max_ras_err_cnt_per_query) {
+ err_data->err_addr =
+ kcalloc(adev->umc.max_ras_err_cnt_per_query,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+
+ /* still call query_ras_error_address to clear error status
+ * even NOMEM error is encountered
+ */
+ if(!err_data->err_addr)
+ dev_warn(adev->dev, "Failed to alloc memory for "
+ "umc error address record!\n");
+ else
+ err_data->err_addr_len = adev->umc.max_ras_err_cnt_per_query;
+
+ /* umc query_ras_error_address is also responsible for clearing
+ * error status
+ */
+ adev->umc.ras->ecc_info_query_ras_error_address(adev, ras_error_status);
+ }
+ }
+
+ /* only uncorrectable error needs gpu reset */
+ if (err_data->ue_count || err_data->de_count) {
+ err_count = err_data->ue_count + err_data->de_count;
+ if ((amdgpu_bad_page_threshold != 0) &&
+ err_data->err_addr_cnt) {
+ amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
+ err_data->err_addr_cnt, false);
+ amdgpu_ras_save_bad_pages(adev, &err_count);
+
+ amdgpu_dpm_send_hbm_bad_pages_num(adev,
+ con->eeprom_control.ras_num_bad_pages);
+
+ if (con->update_channel_flag == true) {
+ amdgpu_dpm_send_hbm_bad_channel_flag(adev, con->eeprom_control.bad_channel_bitmap);
+ con->update_channel_flag = false;
+ }
+ }
+ }
+
+ kfree(err_data->err_addr);
+ err_data->err_addr = NULL;
+
+ mutex_unlock(&con->page_retirement_lock);
+}
+
+static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
+ void *ras_error_status,
+ struct amdgpu_iv_entry *entry,
+ uint32_t reset)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ amdgpu_umc_handle_bad_pages(adev, ras_error_status);
+
+ if ((err_data->ue_count || err_data->de_count) &&
+ (reset || amdgpu_ras_is_rma(adev))) {
+ con->gpu_reset_flags |= reset;
+ amdgpu_ras_reset_gpu(adev);
+ }
+
+ return AMDGPU_RAS_SUCCESS;
+}
+
+int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, uint16_t pasid,
+ pasid_notify pasid_fn, void *data, uint32_t reset)
+{
+ int ret = AMDGPU_RAS_SUCCESS;
+
+ if (adev->gmc.xgmi.connected_to_cpu ||
+ adev->gmc.is_app_apu) {
+ if (reset) {
+ /* MCA poison handler is only responsible for GPU reset,
+ * let MCA notifier do page retirement.
+ */
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ amdgpu_ras_reset_gpu(adev);
+ }
+ return ret;
+ }
+
+ if (!amdgpu_sriov_vf(adev)) {
+ if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) {
+ struct ras_err_data err_data;
+ struct ras_common_if head = {
+ .block = AMDGPU_RAS_BLOCK__UMC,
+ };
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
+
+ ret = amdgpu_ras_error_data_init(&err_data);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset);
+
+ if (ret == AMDGPU_RAS_SUCCESS && obj) {
+ obj->err_data.ue_count += err_data.ue_count;
+ obj->err_data.ce_count += err_data.ce_count;
+ obj->err_data.de_count += err_data.de_count;
+ }
+
+ amdgpu_ras_error_data_fini(&err_data);
+ } else {
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int ret;
+
+ ret = amdgpu_ras_put_poison_req(adev,
+ block, pasid, pasid_fn, data, reset);
+ if (!ret) {
+ atomic_inc(&con->page_retirement_req_cnt);
+ atomic_inc(&con->poison_consumption_count);
+ wake_up(&con->page_retirement_wq);
+ }
+ }
+ } else {
+ if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
+ adev->virt.ops->ras_poison_handler(adev, block);
+ else
+ dev_warn(adev->dev,
+ "No ras_poison_handler interface in SRIOV!\n");
+ }
+
+ return ret;
+}
+
+int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, uint32_t reset)
+{
+ return amdgpu_umc_pasid_poison_handler(adev,
+ block, 0, NULL, NULL, reset);
+}
+
+int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
+ void *ras_error_status,
+ struct amdgpu_iv_entry *entry)
+{
+ return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry,
+ AMDGPU_RAS_GPU_RESET_MODE1_RESET);
+}
+
+int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_umc_ras *ras;
+
+ if (!adev->umc.ras)
+ return 0;
+
+ ras = adev->umc.ras;
+
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register umc ras block!\n");
+ return err;
+ }
+
+ strcpy(adev->umc.ras->ras_block.ras_comm.name, "umc");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__UMC;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->umc.ras_if = &ras->ras_block.ras_comm;
+
+ if (!ras->ras_block.ras_late_init)
+ ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init;
+
+ if (!ras->ras_block.ras_cb)
+ ras->ras_block.ras_cb = amdgpu_umc_process_ras_data_cb;
+
+ return 0;
+}
+
+int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r;
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ if (amdgpu_sriov_vf(adev))
+ return r;
+
+ if (amdgpu_ras_is_supported(adev, ras_block->block)) {
+ r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
+ if (r)
+ goto late_fini;
+ }
+
+ /* ras init of specific umc version */
+ if (adev->umc.ras &&
+ adev->umc.ras->err_cnt_init)
+ adev->umc.ras->err_cnt_init(adev);
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+ return r;
+}
+
+int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->umc.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
+
+int amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
+ uint64_t err_addr,
+ uint64_t retired_page,
+ uint32_t channel_index,
+ uint32_t umc_inst)
+{
+ struct eeprom_table_record *err_rec;
+
+ if (!err_data ||
+ !err_data->err_addr ||
+ (err_data->err_addr_cnt >= err_data->err_addr_len))
+ return -EINVAL;
+
+ err_rec = &err_data->err_addr[err_data->err_addr_cnt];
+
+ err_rec->address = err_addr;
+ /* page frame address is saved */
+ err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+ err_rec->ts = (uint64_t)ktime_get_real_seconds();
+ err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
+ err_rec->cu = 0;
+ err_rec->mem_channel = channel_index;
+ err_rec->mcumc_id = umc_inst;
+
+ err_data->err_addr_cnt++;
+
+ return 0;
+}
+
+static int amdgpu_umc_loop_all_aid(struct amdgpu_device *adev, umc_func func,
+ void *data)
+{
+ uint32_t umc_node_inst;
+ uint32_t node_inst;
+ uint32_t umc_inst;
+ uint32_t ch_inst;
+ int ret;
+
+ /*
+ * This loop is done based on the following -
+ * umc.active mask = mask of active umc instances across all nodes
+ * umc.umc_inst_num = maximum number of umc instancess per node
+ * umc.node_inst_num = maximum number of node instances
+ * Channel instances are not assumed to be harvested.
+ */
+ dev_dbg(adev->dev, "active umcs :%lx umc_inst per node: %d",
+ adev->umc.active_mask, adev->umc.umc_inst_num);
+ for_each_set_bit(umc_node_inst, &(adev->umc.active_mask),
+ adev->umc.node_inst_num * adev->umc.umc_inst_num) {
+ node_inst = umc_node_inst / adev->umc.umc_inst_num;
+ umc_inst = umc_node_inst % adev->umc.umc_inst_num;
+ LOOP_UMC_CH_INST(ch_inst) {
+ dev_dbg(adev->dev,
+ "node_inst :%d umc_inst: %d ch_inst: %d",
+ node_inst, umc_inst, ch_inst);
+ ret = func(adev, node_inst, umc_inst, ch_inst, data);
+ if (ret) {
+ dev_err(adev->dev,
+ "Node %d umc %d ch %d func returns %d\n",
+ node_inst, umc_inst, ch_inst, ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
+ umc_func func, void *data)
+{
+ uint32_t node_inst = 0;
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ int ret = 0;
+
+ if (adev->aid_mask)
+ return amdgpu_umc_loop_all_aid(adev, func, data);
+
+ if (adev->umc.node_inst_num) {
+ LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) {
+ ret = func(adev, node_inst, umc_inst, ch_inst, data);
+ if (ret) {
+ dev_err(adev->dev, "Node %d umc %d ch %d func returns %d\n",
+ node_inst, umc_inst, ch_inst, ret);
+ return ret;
+ }
+ }
+ } else {
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ ret = func(adev, 0, umc_inst, ch_inst, data);
+ if (ret) {
+ dev_err(adev->dev, "Umc %d ch %d func returns %d\n",
+ umc_inst, ch_inst, ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int amdgpu_umc_update_ecc_status(struct amdgpu_device *adev,
+ uint64_t status, uint64_t ipid, uint64_t addr)
+{
+ if (adev->umc.ras->update_ecc_status)
+ return adev->umc.ras->update_ecc_status(adev,
+ status, ipid, addr);
+ return 0;
+}
+
+int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
+ struct radix_tree_root *ecc_tree, struct ras_ecc_err *ecc_err)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_ecc_log_info *ecc_log;
+ int ret;
+
+ ecc_log = &con->umc_ecc_log;
+
+ mutex_lock(&ecc_log->lock);
+ ret = radix_tree_insert(ecc_tree, ecc_err->pa_pfn, ecc_err);
+ if (!ret)
+ radix_tree_tag_set(ecc_tree,
+ ecc_err->pa_pfn, UMC_ECC_NEW_DETECTED_TAG);
+ mutex_unlock(&ecc_log->lock);
+
+ return ret;
+}
+
+int amdgpu_umc_pages_in_a_row(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t pa_addr)
+{
+ struct ta_ras_query_address_output addr_out;
+
+ /* reinit err_data */
+ err_data->err_addr_cnt = 0;
+ err_data->err_addr_len = adev->umc.retire_unit;
+
+ addr_out.pa.pa = pa_addr;
+ if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr)
+ return adev->umc.ras->convert_ras_err_addr(adev, err_data, NULL,
+ &addr_out, false);
+ else
+ return -EINVAL;
+}
+
+int amdgpu_umc_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
+ uint64_t pa_addr, uint64_t *pfns, int len)
+{
+ int i, ret;
+ struct ras_err_data err_data;
+
+ err_data.err_addr = kcalloc(adev->umc.retire_unit,
+ sizeof(struct eeprom_table_record), GFP_KERNEL);
+ if (!err_data.err_addr) {
+ dev_warn(adev->dev, "Failed to alloc memory in bad page lookup!\n");
+ return 0;
+ }
+
+ ret = amdgpu_umc_pages_in_a_row(adev, &err_data, pa_addr);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < adev->umc.retire_unit; i++) {
+ if (i >= len)
+ goto out;
+
+ pfns[i] = err_data.err_addr[i].retired_page;
+ }
+ ret = i;
+ adev->umc.err_addr_cnt = err_data.err_addr_cnt;
+
+out:
+ kfree(err_data.err_addr);
+ return ret;
+}
+
+int amdgpu_umc_mca_to_addr(struct amdgpu_device *adev,
+ uint64_t err_addr, uint32_t ch, uint32_t umc,
+ uint32_t node, uint32_t socket,
+ struct ta_ras_query_address_output *addr_out, bool dump_addr)
+{
+ struct ta_ras_query_address_input addr_in;
+ int ret;
+
+ memset(&addr_in, 0, sizeof(addr_in));
+ addr_in.ma.err_addr = err_addr;
+ addr_in.ma.ch_inst = ch;
+ addr_in.ma.umc_inst = umc;
+ addr_in.ma.node_inst = node;
+ addr_in.ma.socket_id = socket;
+
+ if (adev->umc.ras && adev->umc.ras->convert_ras_err_addr) {
+ ret = adev->umc.ras->convert_ras_err_addr(adev, NULL, &addr_in,
+ addr_out, dump_addr);
+ if (ret)
+ return ret;
+ } else {
+ return 0;
+ }
+
+ return 0;
+}
+
+int amdgpu_umc_pa2mca(struct amdgpu_device *adev,
+ uint64_t pa, uint64_t *mca, enum amdgpu_memory_partition nps)
+{
+ struct ta_ras_query_address_input addr_in;
+ struct ta_ras_query_address_output addr_out;
+ int ret;
+
+ /* nps: the pa belongs to */
+ addr_in.pa.pa = pa | ((uint64_t)nps << 58);
+ addr_in.addr_type = TA_RAS_PA_TO_MCA;
+ ret = psp_ras_query_address(&adev->psp, &addr_in, &addr_out);
+ if (ret) {
+ dev_warn(adev->dev, "Failed to query RAS MCA address for 0x%llx",
+ pa);
+
+ return ret;
+ }
+
+ *mca = addr_out.ma.err_addr;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
new file mode 100644
index 000000000000..ec203f9e5ffa
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __AMDGPU_UMC_H__
+#define __AMDGPU_UMC_H__
+#include "amdgpu_ras.h"
+#include "amdgpu_mca.h"
+/*
+ * (addr / 256) * 4096, the higher 26 bits in ErrorAddr
+ * is the index of 4KB block
+ */
+#define ADDR_OF_4KB_BLOCK(addr) (((addr) & ~0xffULL) << 4)
+/*
+ * (addr / 256) * 8192, the higher 26 bits in ErrorAddr
+ * is the index of 8KB block
+ */
+#define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5)
+/*
+ * (addr / 256) * 32768, the higher 26 bits in ErrorAddr
+ * is the index of 8KB block
+ */
+#define ADDR_OF_32KB_BLOCK(addr) (((addr) & ~0xffULL) << 7)
+/* channel index is the index of 256B block */
+#define ADDR_OF_256B_BLOCK(channel_index) ((channel_index) << 8)
+/* offset in 256B block */
+#define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL)
+
+#define LOOP_UMC_INST(umc_inst) for ((umc_inst) = 0; (umc_inst) < adev->umc.umc_inst_num; (umc_inst)++)
+#define LOOP_UMC_CH_INST(ch_inst) for ((ch_inst) = 0; (ch_inst) < adev->umc.channel_inst_num; (ch_inst)++)
+#define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst))
+
+#define LOOP_UMC_NODE_INST(node_inst) \
+ for_each_set_bit((node_inst), &(adev->umc.active_mask), adev->umc.node_inst_num)
+
+#define LOOP_UMC_EACH_NODE_INST_AND_CH(node_inst, umc_inst, ch_inst) \
+ LOOP_UMC_NODE_INST((node_inst)) LOOP_UMC_INST_AND_CH((umc_inst), (ch_inst))
+
+/* Page retirement tag */
+#define UMC_ECC_NEW_DETECTED_TAG 0x1
+/*
+ * a flag to indicate v2 of channel index stored in eeprom
+ *
+ * v1 (legacy way): store channel index within a umc instance in eeprom
+ * range in UMC v12: 0 ~ 7
+ * v2: store global channel index in eeprom
+ * range in UMC v12: 0 ~ 127
+ *
+ * NOTE: it's better to store it in eeprom_table_record.mem_channel,
+ * but there is only 8 bits in mem_channel, and the channel number may
+ * increase in the future, we decide to save it in
+ * eeprom_table_record.retired_page. retired_page is useless in v2,
+ * we depend on eeprom_table_record.address instead of retired_page in v2.
+ * Only 48 bits are saved on eeprom, use bit 47 here.
+ */
+#define UMC_CHANNEL_IDX_V2 BIT_ULL(47)
+
+/*
+ * save nps value to eeprom_table_record.retired_page[47:40],
+ * the channel index flag above will be retired.
+ */
+#define UMC_NPS_SHIFT 40
+#define UMC_NPS_MASK 0xffULL
+
+/* three column bits and one row bit in MCA address flip
+ * in bad page retirement
+ */
+#define RETIRE_FLIP_BITS_NUM 4
+
+struct amdgpu_umc_flip_bits {
+ uint32_t flip_bits_in_pa[RETIRE_FLIP_BITS_NUM];
+ uint32_t flip_row_bit;
+ uint32_t r13_in_pa;
+ uint32_t bit_num;
+};
+
+typedef int (*umc_func)(struct amdgpu_device *adev, uint32_t node_inst,
+ uint32_t umc_inst, uint32_t ch_inst, void *data);
+
+struct amdgpu_umc_ras {
+ struct amdgpu_ras_block_object ras_block;
+ void (*err_cnt_init)(struct amdgpu_device *adev);
+ bool (*query_ras_poison_mode)(struct amdgpu_device *adev);
+ void (*ecc_info_query_ras_error_count)(struct amdgpu_device *adev,
+ void *ras_error_status);
+ void (*ecc_info_query_ras_error_address)(struct amdgpu_device *adev,
+ void *ras_error_status);
+ bool (*check_ecc_err_status)(struct amdgpu_device *adev,
+ enum amdgpu_mca_error_type type, void *ras_error_status);
+ int (*update_ecc_status)(struct amdgpu_device *adev,
+ uint64_t status, uint64_t ipid, uint64_t addr);
+ int (*convert_ras_err_addr)(struct amdgpu_device *adev,
+ struct ras_err_data *err_data,
+ struct ta_ras_query_address_input *addr_in,
+ struct ta_ras_query_address_output *addr_out,
+ bool dump_addr);
+ uint32_t (*get_die_id_from_pa)(struct amdgpu_device *adev,
+ uint64_t mca_addr, uint64_t retired_page);
+ void (*get_retire_flip_bits)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_umc_funcs {
+ void (*init_registers)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_umc {
+ /* max error count in one ras query call */
+ uint32_t max_ras_err_cnt_per_query;
+ /* number of umc channel instance with memory map register access */
+ uint32_t channel_inst_num;
+ /* number of umc instance with memory map register access */
+ uint32_t umc_inst_num;
+
+ /* Total number of umc node instance including harvest one */
+ uint32_t node_inst_num;
+
+ /* UMC regiser per channel offset */
+ uint32_t channel_offs;
+ /* how many pages are retired in one UE */
+ uint32_t retire_unit;
+ /* channel index table of interleaved memory */
+ const uint32_t *channel_idx_tbl;
+ struct ras_common_if *ras_if;
+
+ const struct amdgpu_umc_funcs *funcs;
+ struct amdgpu_umc_ras *ras;
+
+ /* active mask for umc node instance */
+ unsigned long active_mask;
+
+ struct amdgpu_umc_flip_bits flip_bits;
+
+ unsigned long err_addr_cnt;
+};
+
+int amdgpu_umc_ras_sw_init(struct amdgpu_device *adev);
+int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block);
+int amdgpu_umc_poison_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, uint32_t reset);
+int amdgpu_umc_pasid_poison_handler(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block, uint16_t pasid,
+ pasid_notify pasid_fn, void *data, uint32_t reset);
+int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
+int amdgpu_umc_fill_error_record(struct ras_err_data *err_data,
+ uint64_t err_addr,
+ uint64_t retired_page,
+ uint32_t channel_index,
+ uint32_t umc_inst);
+
+int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
+ void *ras_error_status,
+ struct amdgpu_iv_entry *entry);
+int amdgpu_umc_page_retirement_mca(struct amdgpu_device *adev,
+ uint64_t err_addr, uint32_t ch_inst, uint32_t umc_inst);
+
+int amdgpu_umc_loop_channels(struct amdgpu_device *adev,
+ umc_func func, void *data);
+
+int amdgpu_umc_update_ecc_status(struct amdgpu_device *adev,
+ uint64_t status, uint64_t ipid, uint64_t addr);
+int amdgpu_umc_logs_ecc_err(struct amdgpu_device *adev,
+ struct radix_tree_root *ecc_tree, struct ras_ecc_err *ecc_err);
+
+void amdgpu_umc_handle_bad_pages(struct amdgpu_device *adev,
+ void *ras_error_status);
+int amdgpu_umc_pages_in_a_row(struct amdgpu_device *adev,
+ struct ras_err_data *err_data, uint64_t pa_addr);
+int amdgpu_umc_lookup_bad_pages_in_a_row(struct amdgpu_device *adev,
+ uint64_t pa_addr, uint64_t *pfns, int len);
+int amdgpu_umc_mca_to_addr(struct amdgpu_device *adev,
+ uint64_t err_addr, uint32_t ch, uint32_t umc,
+ uint32_t node, uint32_t socket,
+ struct ta_ras_query_address_output *addr_out, bool dump_addr);
+int amdgpu_umc_pa2mca(struct amdgpu_device *adev,
+ uint64_t pa, uint64_t *mca, enum amdgpu_memory_partition nps);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h
new file mode 100644
index 000000000000..5b27fc41ffbf
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/ioctl.h>
+
+/*
+ * MMIO debugfs IOCTL structure
+ */
+struct amdgpu_debugfs_regs2_iocdata {
+ __u32 use_srbm, use_grbm, pg_lock;
+ struct {
+ __u32 se, sh, instance;
+ } grbm;
+ struct {
+ __u32 me, pipe, queue, vmid;
+ } srbm;
+};
+
+struct amdgpu_debugfs_regs2_iocdata_v2 {
+ __u32 use_srbm, use_grbm, pg_lock;
+ struct {
+ __u32 se, sh, instance;
+ } grbm;
+ struct {
+ __u32 me, pipe, queue, vmid;
+ } srbm;
+ u32 xcc_id;
+};
+
+struct amdgpu_debugfs_gprwave_iocdata {
+ u32 gpr_or_wave, se, sh, cu, wave, simd, xcc_id;
+ struct {
+ u32 thread, vpgr_or_sgpr;
+ } gpr;
+};
+
+/*
+ * MMIO debugfs state data (per file* handle)
+ */
+struct amdgpu_debugfs_regs2_data {
+ struct amdgpu_device *adev;
+ struct mutex lock;
+ struct amdgpu_debugfs_regs2_iocdata_v2 id;
+};
+
+struct amdgpu_debugfs_gprwave_data {
+ struct amdgpu_device *adev;
+ struct mutex lock;
+ struct amdgpu_debugfs_gprwave_iocdata id;
+};
+
+enum AMDGPU_DEBUGFS_REGS2_CMDS {
+ AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE = 0,
+ AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE_V2,
+};
+
+enum AMDGPU_DEBUGFS_GPRWAVE_CMDS {
+ AMDGPU_DEBUGFS_GPRWAVE_CMD_SET_STATE = 0,
+};
+
+//reg2 interface
+#define AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE _IOWR(0x20, AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE, struct amdgpu_debugfs_regs2_iocdata)
+#define AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE_V2 _IOWR(0x20, AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE_V2, struct amdgpu_debugfs_regs2_iocdata_v2)
+
+//gprwave interface
+#define AMDGPU_DEBUGFS_GPRWAVE_IOC_SET_STATE _IOWR(0x20, AMDGPU_DEBUGFS_GPRWAVE_CMD_SET_STATE, struct amdgpu_debugfs_gprwave_iocdata)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
new file mode 100644
index 000000000000..cd707d70a0bf
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <drm/drm_exec.h>
+#include <drm/drm_drv.h>
+
+#include "amdgpu.h"
+#include "amdgpu_umsch_mm.h"
+#include "umsch_mm_v4_0.h"
+
+MODULE_FIRMWARE("amdgpu/umsch_mm_4_0_0.bin");
+
+int amdgpu_umsch_mm_submit_pkt(struct amdgpu_umsch_mm *umsch, void *pkt, int ndws)
+{
+ struct amdgpu_ring *ring = &umsch->ring;
+
+ if (amdgpu_ring_alloc(ring, ndws))
+ return -ENOMEM;
+
+ amdgpu_ring_write_multiple(ring, pkt, ndws);
+ amdgpu_ring_commit(ring);
+
+ return 0;
+}
+
+int amdgpu_umsch_mm_query_fence(struct amdgpu_umsch_mm *umsch)
+{
+ struct amdgpu_ring *ring = &umsch->ring;
+ struct amdgpu_device *adev = ring->adev;
+ int r;
+
+ r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq, adev->usec_timeout);
+ if (r < 1) {
+ dev_err(adev->dev, "ring umsch timeout, emitted fence %u\n",
+ ring->fence_drv.sync_seq);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void umsch_mm_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_umsch_mm *umsch = (struct amdgpu_umsch_mm *)ring;
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->use_doorbell)
+ WDOORBELL32(ring->doorbell_index, ring->wptr << 2);
+ else
+ WREG32(umsch->rb_wptr, ring->wptr << 2);
+}
+
+static u64 umsch_mm_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_umsch_mm *umsch = (struct amdgpu_umsch_mm *)ring;
+ struct amdgpu_device *adev = ring->adev;
+
+ return RREG32(umsch->rb_rptr);
+}
+
+static u64 umsch_mm_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_umsch_mm *umsch = (struct amdgpu_umsch_mm *)ring;
+ struct amdgpu_device *adev = ring->adev;
+
+ return RREG32(umsch->rb_wptr);
+}
+
+static const struct amdgpu_ring_funcs umsch_v4_0_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_UMSCH_MM,
+ .align_mask = 0,
+ .nop = 0,
+ .support_64bit_ptrs = false,
+ .get_rptr = umsch_mm_ring_get_rptr,
+ .get_wptr = umsch_mm_ring_get_wptr,
+ .set_wptr = umsch_mm_ring_set_wptr,
+ .insert_nop = amdgpu_ring_insert_nop,
+};
+
+int amdgpu_umsch_mm_ring_init(struct amdgpu_umsch_mm *umsch)
+{
+ struct amdgpu_device *adev = container_of(umsch, struct amdgpu_device, umsch_mm);
+ struct amdgpu_ring *ring = &umsch->ring;
+
+ ring->vm_hub = AMDGPU_MMHUB0(0);
+ ring->use_doorbell = true;
+ ring->no_scheduler = true;
+ ring->doorbell_index = (AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1) + 6;
+
+ snprintf(ring->name, sizeof(ring->name), "umsch");
+
+ return amdgpu_ring_init(adev, ring, 1024, NULL, 0, AMDGPU_RING_PRIO_DEFAULT, NULL);
+}
+
+int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch)
+{
+ const struct umsch_mm_firmware_header_v1_0 *umsch_mm_hdr;
+ struct amdgpu_device *adev = umsch->ring.adev;
+ const char *fw_name = NULL;
+ int r;
+
+ switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
+ case IP_VERSION(4, 0, 5):
+ case IP_VERSION(4, 0, 6):
+ fw_name = "4_0_0";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ r = amdgpu_ucode_request(adev, &adev->umsch_mm.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/umsch_mm_%s.bin", fw_name);
+ if (r) {
+ release_firmware(adev->umsch_mm.fw);
+ adev->umsch_mm.fw = NULL;
+ return r;
+ }
+
+ umsch_mm_hdr = (const struct umsch_mm_firmware_header_v1_0 *)adev->umsch_mm.fw->data;
+
+ adev->umsch_mm.ucode_size = le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_size_bytes);
+ adev->umsch_mm.data_size = le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_data_size_bytes);
+
+ adev->umsch_mm.irq_start_addr =
+ le32_to_cpu(umsch_mm_hdr->umsch_mm_irq_start_addr_lo) |
+ ((uint64_t)(le32_to_cpu(umsch_mm_hdr->umsch_mm_irq_start_addr_hi)) << 32);
+ adev->umsch_mm.uc_start_addr =
+ le32_to_cpu(umsch_mm_hdr->umsch_mm_uc_start_addr_lo) |
+ ((uint64_t)(le32_to_cpu(umsch_mm_hdr->umsch_mm_uc_start_addr_hi)) << 32);
+ adev->umsch_mm.data_start_addr =
+ le32_to_cpu(umsch_mm_hdr->umsch_mm_data_start_addr_lo) |
+ ((uint64_t)(le32_to_cpu(umsch_mm_hdr->umsch_mm_data_start_addr_hi)) << 32);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ struct amdgpu_firmware_info *info;
+
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_UMSCH_MM_UCODE];
+ info->ucode_id = AMDGPU_UCODE_ID_UMSCH_MM_UCODE;
+ info->fw = adev->umsch_mm.fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_size_bytes), PAGE_SIZE);
+
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_UMSCH_MM_DATA];
+ info->ucode_id = AMDGPU_UCODE_ID_UMSCH_MM_DATA;
+ info->fw = adev->umsch_mm.fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_data_size_bytes), PAGE_SIZE);
+ }
+
+ return 0;
+}
+
+int amdgpu_umsch_mm_allocate_ucode_buffer(struct amdgpu_umsch_mm *umsch)
+{
+ const struct umsch_mm_firmware_header_v1_0 *umsch_mm_hdr;
+ struct amdgpu_device *adev = umsch->ring.adev;
+ const __le32 *fw_data;
+ uint32_t fw_size;
+ int r;
+
+ umsch_mm_hdr = (const struct umsch_mm_firmware_header_v1_0 *)
+ adev->umsch_mm.fw->data;
+
+ fw_data = (const __le32 *)(adev->umsch_mm.fw->data +
+ le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_offset_bytes));
+ fw_size = le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_size_bytes);
+
+ r = amdgpu_bo_create_reserved(adev, fw_size,
+ 4 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->umsch_mm.ucode_fw_obj,
+ &adev->umsch_mm.ucode_fw_gpu_addr,
+ (void **)&adev->umsch_mm.ucode_fw_ptr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to create umsch_mm fw ucode bo\n", r);
+ return r;
+ }
+
+ memcpy(adev->umsch_mm.ucode_fw_ptr, fw_data, fw_size);
+
+ amdgpu_bo_kunmap(adev->umsch_mm.ucode_fw_obj);
+ amdgpu_bo_unreserve(adev->umsch_mm.ucode_fw_obj);
+ return 0;
+}
+
+int amdgpu_umsch_mm_allocate_ucode_data_buffer(struct amdgpu_umsch_mm *umsch)
+{
+ const struct umsch_mm_firmware_header_v1_0 *umsch_mm_hdr;
+ struct amdgpu_device *adev = umsch->ring.adev;
+ const __le32 *fw_data;
+ uint32_t fw_size;
+ int r;
+
+ umsch_mm_hdr = (const struct umsch_mm_firmware_header_v1_0 *)
+ adev->umsch_mm.fw->data;
+
+ fw_data = (const __le32 *)(adev->umsch_mm.fw->data +
+ le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_data_offset_bytes));
+ fw_size = le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_data_size_bytes);
+
+ r = amdgpu_bo_create_reserved(adev, fw_size,
+ 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->umsch_mm.data_fw_obj,
+ &adev->umsch_mm.data_fw_gpu_addr,
+ (void **)&adev->umsch_mm.data_fw_ptr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to create umsch_mm fw data bo\n", r);
+ return r;
+ }
+
+ memcpy(adev->umsch_mm.data_fw_ptr, fw_data, fw_size);
+
+ amdgpu_bo_kunmap(adev->umsch_mm.data_fw_obj);
+ amdgpu_bo_unreserve(adev->umsch_mm.data_fw_obj);
+ return 0;
+}
+
+int amdgpu_umsch_mm_psp_execute_cmd_buf(struct amdgpu_umsch_mm *umsch)
+{
+ struct amdgpu_device *adev = umsch->ring.adev;
+ struct amdgpu_firmware_info ucode = {
+ .ucode_id = AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER,
+ .mc_addr = adev->umsch_mm.cmd_buf_gpu_addr,
+ .ucode_size = ((uintptr_t)adev->umsch_mm.cmd_buf_curr_ptr -
+ (uintptr_t)adev->umsch_mm.cmd_buf_ptr),
+ };
+
+ return psp_execute_ip_fw_load(&adev->psp, &ucode);
+}
+
+static void umsch_mm_agdb_index_init(struct amdgpu_device *adev)
+{
+ uint32_t umsch_mm_agdb_start;
+ int i;
+
+ umsch_mm_agdb_start = adev->doorbell_index.max_assignment + 1;
+ umsch_mm_agdb_start = roundup(umsch_mm_agdb_start, 1024);
+ umsch_mm_agdb_start += (AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1);
+
+ for (i = 0; i < CONTEXT_PRIORITY_NUM_LEVELS; i++)
+ adev->umsch_mm.agdb_index[i] = umsch_mm_agdb_start + i;
+}
+
+static int umsch_mm_init(struct amdgpu_device *adev)
+{
+ int r;
+
+ adev->umsch_mm.vmid_mask_mm_vpe = 0xf00;
+ adev->umsch_mm.engine_mask = (1 << UMSCH_SWIP_ENGINE_TYPE_VPE);
+ adev->umsch_mm.vpe_hqd_mask = 0xfe;
+
+ r = amdgpu_device_wb_get(adev, &adev->umsch_mm.wb_index);
+ if (r) {
+ dev_err(adev->dev, "failed to alloc wb for umsch: %d\n", r);
+ return r;
+ }
+
+ adev->umsch_mm.sch_ctx_gpu_addr = adev->wb.gpu_addr +
+ (adev->umsch_mm.wb_index * 4);
+
+ r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->umsch_mm.cmd_buf_obj,
+ &adev->umsch_mm.cmd_buf_gpu_addr,
+ (void **)&adev->umsch_mm.cmd_buf_ptr);
+ if (r) {
+ dev_err(adev->dev, "failed to allocate cmdbuf bo %d\n", r);
+ amdgpu_device_wb_free(adev, adev->umsch_mm.wb_index);
+ return r;
+ }
+
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_UMSCHFW_LOG_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->umsch_mm.dbglog_bo,
+ &adev->umsch_mm.log_gpu_addr,
+ &adev->umsch_mm.log_cpu_addr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate umsch debug bo\n", r);
+ return r;
+ }
+
+ mutex_init(&adev->umsch_mm.mutex_hidden);
+
+ umsch_mm_agdb_index_init(adev);
+
+ return 0;
+}
+
+
+static int umsch_mm_early_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
+ case IP_VERSION(4, 0, 5):
+ case IP_VERSION(4, 0, 6):
+ umsch_mm_v4_0_set_funcs(&adev->umsch_mm);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ adev->umsch_mm.ring.funcs = &umsch_v4_0_ring_funcs;
+ umsch_mm_set_regs(&adev->umsch_mm);
+
+ return 0;
+}
+
+static int umsch_mm_late_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ if (amdgpu_in_reset(adev) || adev->in_s0ix || adev->in_suspend)
+ return 0;
+
+ return 0;
+}
+
+static int umsch_mm_sw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = umsch_mm_init(adev);
+ if (r)
+ return r;
+
+ amdgpu_umsch_fwlog_init(&adev->umsch_mm);
+ r = umsch_mm_ring_init(&adev->umsch_mm);
+ if (r)
+ return r;
+
+ r = umsch_mm_init_microcode(&adev->umsch_mm);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int umsch_mm_sw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ release_firmware(adev->umsch_mm.fw);
+ adev->umsch_mm.fw = NULL;
+
+ amdgpu_ring_fini(&adev->umsch_mm.ring);
+
+ mutex_destroy(&adev->umsch_mm.mutex_hidden);
+
+ amdgpu_bo_free_kernel(&adev->umsch_mm.cmd_buf_obj,
+ &adev->umsch_mm.cmd_buf_gpu_addr,
+ (void **)&adev->umsch_mm.cmd_buf_ptr);
+
+ amdgpu_bo_free_kernel(&adev->umsch_mm.dbglog_bo,
+ &adev->umsch_mm.log_gpu_addr,
+ (void **)&adev->umsch_mm.log_cpu_addr);
+
+ amdgpu_device_wb_free(adev, adev->umsch_mm.wb_index);
+
+ return 0;
+}
+
+static int umsch_mm_hw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = umsch_mm_load_microcode(&adev->umsch_mm);
+ if (r)
+ return r;
+
+ umsch_mm_ring_start(&adev->umsch_mm);
+
+ r = umsch_mm_set_hw_resources(&adev->umsch_mm);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int umsch_mm_hw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ umsch_mm_ring_stop(&adev->umsch_mm);
+
+ amdgpu_bo_free_kernel(&adev->umsch_mm.data_fw_obj,
+ &adev->umsch_mm.data_fw_gpu_addr,
+ (void **)&adev->umsch_mm.data_fw_ptr);
+
+ amdgpu_bo_free_kernel(&adev->umsch_mm.ucode_fw_obj,
+ &adev->umsch_mm.ucode_fw_gpu_addr,
+ (void **)&adev->umsch_mm.ucode_fw_ptr);
+ return 0;
+}
+
+static int umsch_mm_suspend(struct amdgpu_ip_block *ip_block)
+{
+ return umsch_mm_hw_fini(ip_block);
+}
+
+static int umsch_mm_resume(struct amdgpu_ip_block *ip_block)
+{
+ return umsch_mm_hw_init(ip_block);
+}
+
+void amdgpu_umsch_fwlog_init(struct amdgpu_umsch_mm *umsch_mm)
+{
+#if defined(CONFIG_DEBUG_FS)
+ void *fw_log_cpu_addr = umsch_mm->log_cpu_addr;
+ volatile struct amdgpu_umsch_fwlog *log_buf = fw_log_cpu_addr;
+
+ log_buf->header_size = sizeof(struct amdgpu_umsch_fwlog);
+ log_buf->buffer_size = AMDGPU_UMSCHFW_LOG_SIZE;
+ log_buf->rptr = log_buf->header_size;
+ log_buf->wptr = log_buf->header_size;
+ log_buf->wrapped = 0;
+#endif
+}
+
+/*
+ * debugfs for mapping umsch firmware log buffer.
+ */
+#if defined(CONFIG_DEBUG_FS)
+static ssize_t amdgpu_debugfs_umsch_fwlog_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_umsch_mm *umsch_mm;
+ void *log_buf;
+ volatile struct amdgpu_umsch_fwlog *plog;
+ unsigned int read_pos, write_pos, available, i, read_bytes = 0;
+ unsigned int read_num[2] = {0};
+
+ umsch_mm = file_inode(f)->i_private;
+ if (!umsch_mm)
+ return -ENODEV;
+
+ if (!umsch_mm->log_cpu_addr)
+ return -EFAULT;
+
+ log_buf = umsch_mm->log_cpu_addr;
+
+ plog = (volatile struct amdgpu_umsch_fwlog *)log_buf;
+ read_pos = plog->rptr;
+ write_pos = plog->wptr;
+
+ if (read_pos > AMDGPU_UMSCHFW_LOG_SIZE || write_pos > AMDGPU_UMSCHFW_LOG_SIZE)
+ return -EFAULT;
+
+ if (!size || (read_pos == write_pos))
+ return 0;
+
+ if (write_pos > read_pos) {
+ available = write_pos - read_pos;
+ read_num[0] = min_t(size_t, size, available);
+ } else {
+ read_num[0] = AMDGPU_UMSCHFW_LOG_SIZE - read_pos;
+ available = read_num[0] + write_pos - plog->header_size;
+ if (size > available)
+ read_num[1] = write_pos - plog->header_size;
+ else if (size > read_num[0])
+ read_num[1] = size - read_num[0];
+ else
+ read_num[0] = size;
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (read_num[i]) {
+ if (read_pos == AMDGPU_UMSCHFW_LOG_SIZE)
+ read_pos = plog->header_size;
+ if (read_num[i] == copy_to_user((buf + read_bytes),
+ (log_buf + read_pos), read_num[i]))
+ return -EFAULT;
+
+ read_bytes += read_num[i];
+ read_pos += read_num[i];
+ }
+ }
+
+ plog->rptr = read_pos;
+ *pos += read_bytes;
+ return read_bytes;
+}
+
+static const struct file_operations amdgpu_debugfs_umschfwlog_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_umsch_fwlog_read,
+ .llseek = default_llseek
+};
+#endif
+
+void amdgpu_debugfs_umsch_fwlog_init(struct amdgpu_device *adev,
+ struct amdgpu_umsch_mm *umsch_mm)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+ char name[32];
+
+ sprintf(name, "amdgpu_umsch_fwlog");
+ debugfs_create_file_size(name, S_IFREG | 0444, root, umsch_mm,
+ &amdgpu_debugfs_umschfwlog_fops,
+ AMDGPU_UMSCHFW_LOG_SIZE);
+#endif
+}
+
+static const struct amd_ip_funcs umsch_mm_v4_0_ip_funcs = {
+ .name = "umsch_mm_v4_0",
+ .early_init = umsch_mm_early_init,
+ .late_init = umsch_mm_late_init,
+ .sw_init = umsch_mm_sw_init,
+ .sw_fini = umsch_mm_sw_fini,
+ .hw_init = umsch_mm_hw_init,
+ .hw_fini = umsch_mm_hw_fini,
+ .suspend = umsch_mm_suspend,
+ .resume = umsch_mm_resume,
+};
+
+const struct amdgpu_ip_block_version umsch_mm_v4_0_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_UMSCH_MM,
+ .major = 4,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &umsch_mm_v4_0_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h
new file mode 100644
index 000000000000..2c771a753778
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umsch_mm.h
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_UMSCH_MM_H__
+#define __AMDGPU_UMSCH_MM_H__
+
+enum UMSCH_SWIP_ENGINE_TYPE {
+ UMSCH_SWIP_ENGINE_TYPE_VCN0 = 0,
+ UMSCH_SWIP_ENGINE_TYPE_VCN1 = 1,
+ UMSCH_SWIP_ENGINE_TYPE_VCN = 2,
+ UMSCH_SWIP_ENGINE_TYPE_VPE = 3,
+ UMSCH_SWIP_ENGINE_TYPE_MAX
+};
+
+enum UMSCH_CONTEXT_PRIORITY_LEVEL {
+ CONTEXT_PRIORITY_LEVEL_IDLE = 0,
+ CONTEXT_PRIORITY_LEVEL_NORMAL = 1,
+ CONTEXT_PRIORITY_LEVEL_FOCUS = 2,
+ CONTEXT_PRIORITY_LEVEL_REALTIME = 3,
+ CONTEXT_PRIORITY_NUM_LEVELS
+};
+
+struct umsch_mm_set_resource_input {
+ uint32_t vmid_mask_mm_vcn;
+ uint32_t vmid_mask_mm_vpe;
+ uint32_t collaboration_mask_vpe;
+ uint32_t logging_vmid;
+ uint32_t engine_mask;
+ union {
+ struct {
+ uint32_t disable_reset : 1;
+ uint32_t disable_umsch_mm_log : 1;
+ uint32_t use_rs64mem_for_proc_ctx_csa : 1;
+ uint32_t reserved : 29;
+ };
+ uint32_t uint32_all;
+ };
+};
+
+struct amdgpu_umsch_fwlog {
+ uint32_t rptr;
+ uint32_t wptr;
+ uint32_t buffer_size;
+ uint32_t header_size;
+ uint32_t wrapped;
+};
+
+struct umsch_mm_add_queue_input {
+ uint32_t process_id;
+ uint64_t page_table_base_addr;
+ uint64_t process_va_start;
+ uint64_t process_va_end;
+ uint64_t process_quantum;
+ uint64_t process_csa_addr;
+ uint64_t context_quantum;
+ uint64_t context_csa_addr;
+ uint32_t inprocess_context_priority;
+ enum UMSCH_CONTEXT_PRIORITY_LEVEL context_global_priority_level;
+ uint32_t doorbell_offset_0;
+ uint32_t doorbell_offset_1;
+ enum UMSCH_SWIP_ENGINE_TYPE engine_type;
+ uint32_t affinity;
+ uint64_t mqd_addr;
+ uint64_t h_context;
+ uint64_t h_queue;
+ uint32_t vm_context_cntl;
+
+ uint32_t process_csa_array_index;
+ uint32_t context_csa_array_index;
+
+ struct {
+ uint32_t is_context_suspended : 1;
+ uint32_t collaboration_mode : 1;
+ uint32_t reserved : 30;
+ };
+};
+
+struct umsch_mm_remove_queue_input {
+ uint32_t doorbell_offset_0;
+ uint32_t doorbell_offset_1;
+ uint64_t context_csa_addr;
+ uint32_t context_csa_array_index;
+};
+
+struct MQD_INFO {
+ uint32_t rb_base_hi;
+ uint32_t rb_base_lo;
+ uint32_t rb_size;
+ uint32_t wptr_val;
+ uint32_t rptr_val;
+ uint32_t unmapped;
+ uint32_t vmid;
+};
+
+struct amdgpu_umsch_mm;
+
+struct umsch_mm_funcs {
+ int (*set_hw_resources)(struct amdgpu_umsch_mm *umsch);
+ int (*add_queue)(struct amdgpu_umsch_mm *umsch,
+ struct umsch_mm_add_queue_input *input);
+ int (*remove_queue)(struct amdgpu_umsch_mm *umsch,
+ struct umsch_mm_remove_queue_input *input);
+ int (*set_regs)(struct amdgpu_umsch_mm *umsch);
+ int (*init_microcode)(struct amdgpu_umsch_mm *umsch);
+ int (*load_microcode)(struct amdgpu_umsch_mm *umsch);
+ int (*ring_init)(struct amdgpu_umsch_mm *umsch);
+ int (*ring_start)(struct amdgpu_umsch_mm *umsch);
+ int (*ring_stop)(struct amdgpu_umsch_mm *umsch);
+ int (*ring_fini)(struct amdgpu_umsch_mm *umsch);
+};
+
+struct amdgpu_umsch_mm {
+ struct amdgpu_ring ring;
+
+ uint32_t rb_wptr;
+ uint32_t rb_rptr;
+
+ const struct umsch_mm_funcs *funcs;
+
+ const struct firmware *fw;
+ uint32_t fw_version;
+ uint32_t feature_version;
+
+ struct amdgpu_bo *ucode_fw_obj;
+ uint64_t ucode_fw_gpu_addr;
+ uint32_t *ucode_fw_ptr;
+ uint64_t irq_start_addr;
+ uint64_t uc_start_addr;
+ uint32_t ucode_size;
+
+ struct amdgpu_bo *data_fw_obj;
+ uint64_t data_fw_gpu_addr;
+ uint32_t *data_fw_ptr;
+ uint64_t data_start_addr;
+ uint32_t data_size;
+
+ struct amdgpu_bo *cmd_buf_obj;
+ uint64_t cmd_buf_gpu_addr;
+ uint32_t *cmd_buf_ptr;
+ uint32_t *cmd_buf_curr_ptr;
+
+ uint32_t wb_index;
+ uint64_t sch_ctx_gpu_addr;
+ uint32_t *sch_ctx_cpu_addr;
+
+ uint32_t vmid_mask_mm_vcn;
+ uint32_t vmid_mask_mm_vpe;
+ uint32_t engine_mask;
+ uint32_t vcn0_hqd_mask;
+ uint32_t vcn1_hqd_mask;
+ uint32_t vcn_hqd_mask[2];
+ uint32_t vpe_hqd_mask;
+ uint32_t agdb_index[CONTEXT_PRIORITY_NUM_LEVELS];
+
+ struct mutex mutex_hidden;
+ struct amdgpu_bo *dbglog_bo;
+ void *log_cpu_addr;
+ uint64_t log_gpu_addr;
+ uint32_t mem_size;
+ uint32_t log_offset;
+};
+
+int amdgpu_umsch_mm_submit_pkt(struct amdgpu_umsch_mm *umsch, void *pkt, int ndws);
+int amdgpu_umsch_mm_query_fence(struct amdgpu_umsch_mm *umsch);
+
+int amdgpu_umsch_mm_init_microcode(struct amdgpu_umsch_mm *umsch);
+int amdgpu_umsch_mm_allocate_ucode_buffer(struct amdgpu_umsch_mm *umsch);
+int amdgpu_umsch_mm_allocate_ucode_data_buffer(struct amdgpu_umsch_mm *umsch);
+
+int amdgpu_umsch_mm_psp_execute_cmd_buf(struct amdgpu_umsch_mm *umsch);
+
+int amdgpu_umsch_mm_ring_init(struct amdgpu_umsch_mm *umsch);
+
+void amdgpu_debugfs_umsch_fwlog_init(struct amdgpu_device *adev,
+ struct amdgpu_umsch_mm *umsch);
+
+void amdgpu_umsch_fwlog_init(struct amdgpu_umsch_mm *umsch_mm);
+
+#define WREG32_SOC15_UMSCH(reg, value) \
+ do { \
+ uint32_t reg_offset = adev->reg_offset[VCN_HWIP][0][reg##_BASE_IDX] + reg; \
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { \
+ *adev->umsch_mm.cmd_buf_curr_ptr++ = (reg_offset << 2); \
+ *adev->umsch_mm.cmd_buf_curr_ptr++ = value; \
+ } else { \
+ WREG32(reg_offset, value); \
+ } \
+ } while (0)
+
+#define umsch_mm_set_hw_resources(umsch) \
+ ((umsch)->funcs->set_hw_resources ? (umsch)->funcs->set_hw_resources((umsch)) : 0)
+#define umsch_mm_add_queue(umsch, input) \
+ ((umsch)->funcs->add_queue ? (umsch)->funcs->add_queue((umsch), (input)) : 0)
+#define umsch_mm_remove_queue(umsch, input) \
+ ((umsch)->funcs->remove_queue ? (umsch)->funcs->remove_queue((umsch), (input)) : 0)
+
+#define umsch_mm_set_regs(umsch) \
+ ((umsch)->funcs->set_regs ? (umsch)->funcs->set_regs((umsch)) : 0)
+#define umsch_mm_init_microcode(umsch) \
+ ((umsch)->funcs->init_microcode ? (umsch)->funcs->init_microcode((umsch)) : 0)
+#define umsch_mm_load_microcode(umsch) \
+ ((umsch)->funcs->load_microcode ? (umsch)->funcs->load_microcode((umsch)) : 0)
+
+#define umsch_mm_ring_init(umsch) \
+ ((umsch)->funcs->ring_init ? (umsch)->funcs->ring_init((umsch)) : 0)
+#define umsch_mm_ring_start(umsch) \
+ ((umsch)->funcs->ring_start ? (umsch)->funcs->ring_start((umsch)) : 0)
+#define umsch_mm_ring_stop(umsch) \
+ ((umsch)->funcs->ring_stop ? (umsch)->funcs->ring_stop((umsch)) : 0)
+#define umsch_mm_ring_fini(umsch) \
+ ((umsch)->funcs->ring_fini ? (umsch)->funcs->ring_fini((umsch)) : 0)
+
+static inline void amdgpu_umsch_mm_lock(struct amdgpu_umsch_mm *umsch)
+{
+ mutex_lock(&umsch->mutex_hidden);
+}
+
+static inline void amdgpu_umsch_mm_unlock(struct amdgpu_umsch_mm *umsch)
+{
+ mutex_unlock(&umsch->mutex_hidden);
+}
+
+extern const struct amdgpu_ip_block_version umsch_mm_v4_0_ip_block;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
new file mode 100644
index 000000000000..1add21160d21
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -0,0 +1,1090 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <drm/drm_auth.h>
+#include <drm/drm_exec.h>
+#include <linux/pm_runtime.h>
+
+#include "amdgpu.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_userq.h"
+#include "amdgpu_userq_fence.h"
+
+u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
+{
+ int i;
+ u32 userq_ip_mask = 0;
+
+ for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
+ if (adev->userq_funcs[i])
+ userq_ip_mask |= (1 << i);
+ }
+
+ return userq_ip_mask;
+}
+
+int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
+ u64 expected_size)
+{
+ struct amdgpu_bo_va_mapping *va_map;
+ u64 user_addr;
+ u64 size;
+ int r = 0;
+
+ user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT;
+ size = expected_size >> AMDGPU_GPU_PAGE_SHIFT;
+
+ r = amdgpu_bo_reserve(vm->root.bo, false);
+ if (r)
+ return r;
+
+ va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
+ if (!va_map) {
+ r = -EINVAL;
+ goto out_err;
+ }
+ /* Only validate the userq whether resident in the VM mapping range */
+ if (user_addr >= va_map->start &&
+ va_map->last - user_addr + 1 >= size) {
+ amdgpu_bo_unreserve(vm->root.bo);
+ return 0;
+ }
+
+ r = -EINVAL;
+out_err:
+ amdgpu_bo_unreserve(vm->root.bo);
+ return r;
+}
+
+static int
+amdgpu_userq_preempt_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ int r = 0;
+
+ if (queue->state == AMDGPU_USERQ_STATE_MAPPED) {
+ r = userq_funcs->preempt(uq_mgr, queue);
+ if (r) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ } else {
+ queue->state = AMDGPU_USERQ_STATE_PREEMPTED;
+ }
+ }
+
+ return r;
+}
+
+static int
+amdgpu_userq_restore_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ int r = 0;
+
+ if (queue->state == AMDGPU_USERQ_STATE_PREEMPTED) {
+ r = userq_funcs->restore(uq_mgr, queue);
+ if (r) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ } else {
+ queue->state = AMDGPU_USERQ_STATE_MAPPED;
+ }
+ }
+
+ return r;
+}
+
+static int
+amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ int r = 0;
+
+ if ((queue->state == AMDGPU_USERQ_STATE_MAPPED) ||
+ (queue->state == AMDGPU_USERQ_STATE_PREEMPTED)) {
+ r = userq_funcs->unmap(uq_mgr, queue);
+ if (r)
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ else
+ queue->state = AMDGPU_USERQ_STATE_UNMAPPED;
+ }
+ return r;
+}
+
+static int
+amdgpu_userq_map_helper(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *userq_funcs =
+ adev->userq_funcs[queue->queue_type];
+ int r = 0;
+
+ if (queue->state == AMDGPU_USERQ_STATE_UNMAPPED) {
+ r = userq_funcs->map(uq_mgr, queue);
+ if (r) {
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ } else {
+ queue->state = AMDGPU_USERQ_STATE_MAPPED;
+ }
+ }
+ return r;
+}
+
+static void
+amdgpu_userq_wait_for_last_fence(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue)
+{
+ struct dma_fence *f = queue->last_fence;
+ int ret;
+
+ if (f && !dma_fence_is_signaled(f)) {
+ ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
+ if (ret <= 0)
+ drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
+ f->context, f->seqno);
+ }
+}
+
+static void
+amdgpu_userq_cleanup(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue,
+ int queue_id)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
+
+ uq_funcs->mqd_destroy(uq_mgr, queue);
+ amdgpu_userq_fence_driver_free(queue);
+ idr_remove(&uq_mgr->userq_idr, queue_id);
+ kfree(queue);
+}
+
+static struct amdgpu_usermode_queue *
+amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid)
+{
+ return idr_find(&uq_mgr->userq_idr, qid);
+}
+
+void
+amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ struct amdgpu_eviction_fence *ev_fence;
+
+retry:
+ /* Flush any pending resume work to create ev_fence */
+ flush_delayed_work(&uq_mgr->resume_work);
+
+ mutex_lock(&uq_mgr->userq_mutex);
+ spin_lock(&evf_mgr->ev_fence_lock);
+ ev_fence = evf_mgr->ev_fence;
+ spin_unlock(&evf_mgr->ev_fence_lock);
+ if (!ev_fence || dma_fence_is_signaled(&ev_fence->base)) {
+ mutex_unlock(&uq_mgr->userq_mutex);
+ /*
+ * Looks like there was no pending resume work,
+ * add one now to create a valid eviction fence
+ */
+ schedule_delayed_work(&uq_mgr->resume_work, 0);
+ goto retry;
+ }
+}
+
+int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj,
+ int size)
+{
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_bo_param bp;
+ int r;
+
+ memset(&bp, 0, sizeof(bp));
+ bp.byte_align = PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_GTT;
+ bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+ AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ bp.type = ttm_bo_type_kernel;
+ bp.size = size;
+ bp.resv = NULL;
+ bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
+ r = amdgpu_bo_create(adev, &bp, &userq_obj->obj);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to allocate BO for userqueue (%d)", r);
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(userq_obj->obj, true);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to reserve BO to map (%d)", r);
+ goto free_obj;
+ }
+
+ r = amdgpu_ttm_alloc_gart(&(userq_obj->obj)->tbo);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to alloc GART for userqueue object (%d)", r);
+ goto unresv;
+ }
+
+ r = amdgpu_bo_kmap(userq_obj->obj, &userq_obj->cpu_ptr);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to map BO for userqueue (%d)", r);
+ goto unresv;
+ }
+
+ userq_obj->gpu_addr = amdgpu_bo_gpu_offset(userq_obj->obj);
+ amdgpu_bo_unreserve(userq_obj->obj);
+ memset(userq_obj->cpu_ptr, 0, size);
+ return 0;
+
+unresv:
+ amdgpu_bo_unreserve(userq_obj->obj);
+
+free_obj:
+ amdgpu_bo_unref(&userq_obj->obj);
+ return r;
+}
+
+void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj)
+{
+ amdgpu_bo_kunmap(userq_obj->obj);
+ amdgpu_bo_unref(&userq_obj->obj);
+}
+
+uint64_t
+amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_db_info *db_info,
+ struct drm_file *filp)
+{
+ uint64_t index;
+ struct drm_gem_object *gobj;
+ struct amdgpu_userq_obj *db_obj = db_info->db_obj;
+ int r, db_size;
+
+ gobj = drm_gem_object_lookup(filp, db_info->doorbell_handle);
+ if (gobj == NULL) {
+ drm_file_err(uq_mgr->file, "Can't find GEM object for doorbell\n");
+ return -EINVAL;
+ }
+
+ db_obj->obj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
+ drm_gem_object_put(gobj);
+
+ r = amdgpu_bo_reserve(db_obj->obj, true);
+ if (r) {
+ drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
+ goto unref_bo;
+ }
+
+ /* Pin the BO before generating the index, unpin in queue destroy */
+ r = amdgpu_bo_pin(db_obj->obj, AMDGPU_GEM_DOMAIN_DOORBELL);
+ if (r) {
+ drm_file_err(uq_mgr->file, "[Usermode queues] Failed to pin doorbell object\n");
+ goto unresv_bo;
+ }
+
+ switch (db_info->queue_type) {
+ case AMDGPU_HW_IP_GFX:
+ case AMDGPU_HW_IP_COMPUTE:
+ case AMDGPU_HW_IP_DMA:
+ db_size = sizeof(u64);
+ break;
+
+ case AMDGPU_HW_IP_VCN_ENC:
+ db_size = sizeof(u32);
+ db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VCN0_1 << 1;
+ break;
+
+ case AMDGPU_HW_IP_VPE:
+ db_size = sizeof(u32);
+ db_info->doorbell_offset += AMDGPU_NAVI10_DOORBELL64_VPE << 1;
+ break;
+
+ default:
+ drm_file_err(uq_mgr->file, "[Usermode queues] IP %d not support\n",
+ db_info->queue_type);
+ r = -EINVAL;
+ goto unpin_bo;
+ }
+
+ index = amdgpu_doorbell_index_on_bar(uq_mgr->adev, db_obj->obj,
+ db_info->doorbell_offset, db_size);
+ drm_dbg_driver(adev_to_drm(uq_mgr->adev),
+ "[Usermode queues] doorbell index=%lld\n", index);
+ amdgpu_bo_unreserve(db_obj->obj);
+ return index;
+
+unpin_bo:
+ amdgpu_bo_unpin(db_obj->obj);
+unresv_bo:
+ amdgpu_bo_unreserve(db_obj->obj);
+unref_bo:
+ amdgpu_bo_unref(&db_obj->obj);
+ return r;
+}
+
+static int
+amdgpu_userq_destroy(struct drm_file *filp, int queue_id)
+{
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_usermode_queue *queue;
+ int r = 0;
+
+ cancel_delayed_work_sync(&uq_mgr->resume_work);
+ mutex_lock(&uq_mgr->userq_mutex);
+
+ queue = amdgpu_userq_find(uq_mgr, queue_id);
+ if (!queue) {
+ drm_dbg_driver(adev_to_drm(uq_mgr->adev), "Invalid queue id to destroy\n");
+ mutex_unlock(&uq_mgr->userq_mutex);
+ return -EINVAL;
+ }
+ amdgpu_userq_wait_for_last_fence(uq_mgr, queue);
+ r = amdgpu_bo_reserve(queue->db_obj.obj, true);
+ if (!r) {
+ amdgpu_bo_unpin(queue->db_obj.obj);
+ amdgpu_bo_unreserve(queue->db_obj.obj);
+ }
+ amdgpu_bo_unref(&queue->db_obj.obj);
+
+#if defined(CONFIG_DEBUG_FS)
+ debugfs_remove_recursive(queue->debugfs_queue);
+#endif
+ r = amdgpu_userq_unmap_helper(uq_mgr, queue);
+ /*TODO: It requires a reset for userq hw unmap error*/
+ if (unlikely(r != AMDGPU_USERQ_STATE_UNMAPPED)) {
+ drm_warn(adev_to_drm(uq_mgr->adev), "trying to destroy a HW mapping userq\n");
+ queue->state = AMDGPU_USERQ_STATE_HUNG;
+ }
+ amdgpu_userq_cleanup(uq_mgr, queue, queue_id);
+ mutex_unlock(&uq_mgr->userq_mutex);
+
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ return r;
+}
+
+static int amdgpu_userq_priority_permit(struct drm_file *filp,
+ int priority)
+{
+ if (priority < AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_HIGH)
+ return 0;
+
+ if (capable(CAP_SYS_NICE))
+ return 0;
+
+ if (drm_is_current_master(filp))
+ return 0;
+
+ return -EACCES;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_mqd_info_read(struct seq_file *m, void *unused)
+{
+ struct amdgpu_usermode_queue *queue = m->private;
+ struct amdgpu_bo *bo;
+ int r;
+
+ if (!queue || !queue->mqd.obj)
+ return -EINVAL;
+
+ bo = amdgpu_bo_ref(queue->mqd.obj);
+ r = amdgpu_bo_reserve(bo, true);
+ if (r) {
+ amdgpu_bo_unref(&bo);
+ return -EINVAL;
+ }
+
+ seq_printf(m, "queue_type: %d\n", queue->queue_type);
+ seq_printf(m, "mqd_gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(queue->mqd.obj));
+
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+
+ return 0;
+}
+
+static int amdgpu_mqd_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, amdgpu_mqd_info_read, inode->i_private);
+}
+
+static const struct file_operations amdgpu_mqd_info_fops = {
+ .owner = THIS_MODULE,
+ .open = amdgpu_mqd_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
+static int
+amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
+{
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_device *adev = uq_mgr->adev;
+ const struct amdgpu_userq_funcs *uq_funcs;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_db_info db_info;
+ char *queue_name;
+ bool skip_map_queue;
+ uint64_t index;
+ int qid, r = 0;
+ int priority =
+ (args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
+ AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
+
+ r = amdgpu_userq_priority_permit(filp, priority);
+ if (r)
+ return r;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ drm_file_err(uq_mgr->file, "pm_runtime_get_sync() failed for userqueue create\n");
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ /*
+ * There could be a situation that we are creating a new queue while
+ * the other queues under this UQ_mgr are suspended. So if there is any
+ * resume work pending, wait for it to get done.
+ *
+ * This will also make sure we have a valid eviction fence ready to be used.
+ */
+ mutex_lock(&adev->userq_mutex);
+ amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
+
+ uq_funcs = adev->userq_funcs[args->in.ip_type];
+ if (!uq_funcs) {
+ drm_file_err(uq_mgr->file, "Usermode queue is not supported for this IP (%u)\n",
+ args->in.ip_type);
+ r = -EINVAL;
+ goto unlock;
+ }
+
+ queue = kzalloc(sizeof(struct amdgpu_usermode_queue), GFP_KERNEL);
+ if (!queue) {
+ drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n");
+ r = -ENOMEM;
+ goto unlock;
+ }
+
+ /* Validate the userq virtual address.*/
+ if (amdgpu_userq_input_va_validate(&fpriv->vm, args->in.queue_va, args->in.queue_size) ||
+ amdgpu_userq_input_va_validate(&fpriv->vm, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
+ amdgpu_userq_input_va_validate(&fpriv->vm, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
+ r = -EINVAL;
+ kfree(queue);
+ goto unlock;
+ }
+ queue->doorbell_handle = args->in.doorbell_handle;
+ queue->queue_type = args->in.ip_type;
+ queue->vm = &fpriv->vm;
+ queue->priority = priority;
+
+ db_info.queue_type = queue->queue_type;
+ db_info.doorbell_handle = queue->doorbell_handle;
+ db_info.db_obj = &queue->db_obj;
+ db_info.doorbell_offset = args->in.doorbell_offset;
+
+ /* Convert relative doorbell offset into absolute doorbell index */
+ index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
+ if (index == (uint64_t)-EINVAL) {
+ drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
+ kfree(queue);
+ r = -EINVAL;
+ goto unlock;
+ }
+
+ queue->doorbell_index = index;
+ xa_init_flags(&queue->fence_drv_xa, XA_FLAGS_ALLOC);
+ r = amdgpu_userq_fence_driver_alloc(adev, queue);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n");
+ goto unlock;
+ }
+
+ r = uq_funcs->mqd_create(uq_mgr, &args->in, queue);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to create Queue\n");
+ amdgpu_userq_fence_driver_free(queue);
+ kfree(queue);
+ goto unlock;
+ }
+
+
+ qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL);
+ if (qid < 0) {
+ drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n");
+ amdgpu_userq_fence_driver_free(queue);
+ uq_funcs->mqd_destroy(uq_mgr, queue);
+ kfree(queue);
+ r = -ENOMEM;
+ goto unlock;
+ }
+
+ /* don't map the queue if scheduling is halted */
+ if (adev->userq_halt_for_enforce_isolation &&
+ ((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)))
+ skip_map_queue = true;
+ else
+ skip_map_queue = false;
+ if (!skip_map_queue) {
+ r = amdgpu_userq_map_helper(uq_mgr, queue);
+ if (r) {
+ drm_file_err(uq_mgr->file, "Failed to map Queue\n");
+ idr_remove(&uq_mgr->userq_idr, qid);
+ amdgpu_userq_fence_driver_free(queue);
+ uq_funcs->mqd_destroy(uq_mgr, queue);
+ kfree(queue);
+ goto unlock;
+ }
+ }
+
+ queue_name = kasprintf(GFP_KERNEL, "queue-%d", qid);
+ if (!queue_name) {
+ r = -ENOMEM;
+ goto unlock;
+ }
+
+#if defined(CONFIG_DEBUG_FS)
+ /* Queue dentry per client to hold MQD information */
+ queue->debugfs_queue = debugfs_create_dir(queue_name, filp->debugfs_client);
+ debugfs_create_file("mqd_info", 0444, queue->debugfs_queue, queue, &amdgpu_mqd_info_fops);
+#endif
+ kfree(queue_name);
+
+ args->out.queue_id = qid;
+
+unlock:
+ mutex_unlock(&uq_mgr->userq_mutex);
+ mutex_unlock(&adev->userq_mutex);
+
+ return r;
+}
+
+static int amdgpu_userq_input_args_validate(struct drm_device *dev,
+ union drm_amdgpu_userq *args,
+ struct drm_file *filp)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+
+ switch (args->in.op) {
+ case AMDGPU_USERQ_OP_CREATE:
+ if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK |
+ AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE))
+ return -EINVAL;
+ /* Usermode queues are only supported for GFX IP as of now */
+ if (args->in.ip_type != AMDGPU_HW_IP_GFX &&
+ args->in.ip_type != AMDGPU_HW_IP_DMA &&
+ args->in.ip_type != AMDGPU_HW_IP_COMPUTE) {
+ drm_file_err(filp, "Usermode queue doesn't support IP type %u\n",
+ args->in.ip_type);
+ return -EINVAL;
+ }
+
+ if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) &&
+ (args->in.ip_type != AMDGPU_HW_IP_GFX) &&
+ (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) &&
+ !amdgpu_is_tmz(adev)) {
+ drm_file_err(filp, "Secure only supported on GFX/Compute queues\n");
+ return -EINVAL;
+ }
+
+ if (args->in.queue_va == AMDGPU_BO_INVALID_OFFSET ||
+ args->in.queue_va == 0 ||
+ args->in.queue_size == 0) {
+ drm_file_err(filp, "invalidate userq queue va or size\n");
+ return -EINVAL;
+ }
+ if (!args->in.wptr_va || !args->in.rptr_va) {
+ drm_file_err(filp, "invalidate userq queue rptr or wptr\n");
+ return -EINVAL;
+ }
+ break;
+ case AMDGPU_USERQ_OP_FREE:
+ if (args->in.ip_type ||
+ args->in.doorbell_handle ||
+ args->in.doorbell_offset ||
+ args->in.flags ||
+ args->in.queue_va ||
+ args->in.queue_size ||
+ args->in.rptr_va ||
+ args->in.wptr_va ||
+ args->in.mqd ||
+ args->in.mqd_size)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int amdgpu_userq_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ union drm_amdgpu_userq *args = data;
+ int r;
+
+ if (amdgpu_userq_input_args_validate(dev, args, filp) < 0)
+ return -EINVAL;
+
+ switch (args->in.op) {
+ case AMDGPU_USERQ_OP_CREATE:
+ r = amdgpu_userq_create(filp, args);
+ if (r)
+ drm_file_err(filp, "Failed to create usermode queue\n");
+ break;
+
+ case AMDGPU_USERQ_OP_FREE:
+ r = amdgpu_userq_destroy(filp, args->in.queue_id);
+ if (r)
+ drm_file_err(filp, "Failed to destroy usermode queue\n");
+ break;
+
+ default:
+ drm_dbg_driver(dev, "Invalid user queue op specified: %d\n", args->in.op);
+ return -EINVAL;
+ }
+
+ return r;
+}
+
+static int
+amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ int queue_id;
+ int ret = 0, r;
+
+ /* Resume all the queues for this process */
+ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ r = amdgpu_userq_restore_helper(uq_mgr, queue);
+ if (r)
+ ret = r;
+ }
+
+ if (ret)
+ drm_file_err(uq_mgr->file, "Failed to map all the queues\n");
+ return ret;
+}
+
+static int amdgpu_userq_validate_vm(void *param, struct amdgpu_bo *bo)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+
+ amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+ return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+}
+
+/* Handle all BOs on the invalidated list, validate them and update the PTs */
+static int
+amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec,
+ struct amdgpu_vm *vm)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_bo_va *bo_va;
+ struct amdgpu_bo *bo;
+ int ret;
+
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->invalidated)) {
+ bo_va = list_first_entry(&vm->invalidated,
+ struct amdgpu_bo_va,
+ base.vm_status);
+ spin_unlock(&vm->status_lock);
+
+ bo = bo_va->base.bo;
+ ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 2);
+ if (unlikely(ret))
+ return ret;
+
+ amdgpu_bo_placement_from_domain(bo, bo->allowed_domains);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (ret)
+ return ret;
+
+ /* This moves the bo_va to the done list */
+ ret = amdgpu_vm_bo_update(adev, bo_va, false);
+ if (ret)
+ return ret;
+
+ spin_lock(&vm->status_lock);
+ }
+ spin_unlock(&vm->status_lock);
+
+ return 0;
+}
+
+/* Make sure the whole VM is ready to be used */
+static int
+amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+ struct amdgpu_device *adev = uq_mgr->adev;
+ struct amdgpu_vm *vm = &fpriv->vm;
+ struct amdgpu_bo_va *bo_va;
+ struct drm_exec exec;
+ int ret;
+
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
+ drm_exec_until_all_locked(&exec) {
+ ret = amdgpu_vm_lock_pd(vm, &exec, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto unlock_all;
+
+ ret = amdgpu_vm_lock_done_list(vm, &exec, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto unlock_all;
+
+ /* This validates PDs, PTs and per VM BOs */
+ ret = amdgpu_vm_validate(adev, vm, NULL,
+ amdgpu_userq_validate_vm,
+ NULL);
+ if (unlikely(ret))
+ goto unlock_all;
+
+ /* This locks and validates the remaining evicted BOs */
+ ret = amdgpu_userq_bo_validate(adev, &exec, vm);
+ drm_exec_retry_on_contention(&exec);
+ if (unlikely(ret))
+ goto unlock_all;
+ }
+
+ ret = amdgpu_vm_handle_moved(adev, vm, NULL);
+ if (ret)
+ goto unlock_all;
+
+ ret = amdgpu_vm_update_pdes(adev, vm, false);
+ if (ret)
+ goto unlock_all;
+
+ /*
+ * We need to wait for all VM updates to finish before restarting the
+ * queues. Using the done list like that is now ok since everything is
+ * locked in place.
+ */
+ list_for_each_entry(bo_va, &vm->done, base.vm_status)
+ dma_fence_wait(bo_va->last_pt_update, false);
+ dma_fence_wait(vm->last_update, false);
+
+ ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec);
+ if (ret)
+ drm_file_err(uq_mgr->file, "Failed to replace eviction fence\n");
+
+unlock_all:
+ drm_exec_fini(&exec);
+ return ret;
+}
+
+static void amdgpu_userq_restore_worker(struct work_struct *work)
+{
+ struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work, resume_work.work);
+ struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+ int ret;
+
+ flush_delayed_work(&fpriv->evf_mgr.suspend_work);
+
+ mutex_lock(&uq_mgr->userq_mutex);
+
+ ret = amdgpu_userq_vm_validate(uq_mgr);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n");
+ goto unlock;
+ }
+
+ ret = amdgpu_userq_restore_all(uq_mgr);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Failed to restore all queues\n");
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&uq_mgr->userq_mutex);
+}
+
+static int
+amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ int queue_id;
+ int ret = 0, r;
+
+ /* Try to unmap all the queues in this process ctx */
+ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ r = amdgpu_userq_preempt_helper(uq_mgr, queue);
+ if (r)
+ ret = r;
+ }
+
+ if (ret)
+ drm_file_err(uq_mgr->file, "Couldn't unmap all the queues\n");
+ return ret;
+}
+
+static int
+amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
+{
+ struct amdgpu_usermode_queue *queue;
+ int queue_id, ret;
+
+ idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ struct dma_fence *f = queue->last_fence;
+
+ if (!f || dma_fence_is_signaled(f))
+ continue;
+ ret = dma_fence_wait_timeout(f, true, msecs_to_jiffies(100));
+ if (ret <= 0) {
+ drm_file_err(uq_mgr->file, "Timed out waiting for fence=%llu:%llu\n",
+ f->context, f->seqno);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+void
+amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_eviction_fence *ev_fence)
+{
+ int ret;
+ struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+ struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr;
+
+ /* Wait for any pending userqueue fence work to finish */
+ ret = amdgpu_userq_wait_for_signal(uq_mgr);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Not evicting userqueue, timeout waiting for work\n");
+ return;
+ }
+
+ ret = amdgpu_userq_evict_all(uq_mgr);
+ if (ret) {
+ drm_file_err(uq_mgr->file, "Failed to evict userqueue\n");
+ return;
+ }
+
+ /* Signal current eviction fence */
+ amdgpu_eviction_fence_signal(evf_mgr, ev_fence);
+
+ if (evf_mgr->fd_closing) {
+ cancel_delayed_work_sync(&uq_mgr->resume_work);
+ return;
+ }
+
+ /* Schedule a resume work */
+ schedule_delayed_work(&uq_mgr->resume_work, 0);
+}
+
+int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,
+ struct amdgpu_device *adev)
+{
+ mutex_init(&userq_mgr->userq_mutex);
+ idr_init_base(&userq_mgr->userq_idr, 1);
+ userq_mgr->adev = adev;
+ userq_mgr->file = file_priv;
+
+ mutex_lock(&adev->userq_mutex);
+ list_add(&userq_mgr->list, &adev->userq_mgr_list);
+ mutex_unlock(&adev->userq_mutex);
+
+ INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker);
+ return 0;
+}
+
+void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
+{
+ struct amdgpu_device *adev = userq_mgr->adev;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ uint32_t queue_id;
+
+ cancel_delayed_work_sync(&userq_mgr->resume_work);
+
+ mutex_lock(&adev->userq_mutex);
+ mutex_lock(&userq_mgr->userq_mutex);
+ idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) {
+ amdgpu_userq_wait_for_last_fence(userq_mgr, queue);
+ amdgpu_userq_unmap_helper(userq_mgr, queue);
+ amdgpu_userq_cleanup(userq_mgr, queue, queue_id);
+ }
+
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ if (uqm == userq_mgr) {
+ list_del(&uqm->list);
+ break;
+ }
+ }
+ idr_destroy(&userq_mgr->userq_idr);
+ mutex_unlock(&userq_mgr->userq_mutex);
+ mutex_unlock(&adev->userq_mutex);
+ mutex_destroy(&userq_mgr->userq_mutex);
+}
+
+int amdgpu_userq_suspend(struct amdgpu_device *adev)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0, r;
+
+ if (!ip_mask)
+ return 0;
+
+ mutex_lock(&adev->userq_mutex);
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ cancel_delayed_work_sync(&uqm->resume_work);
+ mutex_lock(&uqm->userq_mutex);
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ if (adev->in_s0ix)
+ r = amdgpu_userq_preempt_helper(uqm, queue);
+ else
+ r = amdgpu_userq_unmap_helper(uqm, queue);
+ if (r)
+ ret = r;
+ }
+ mutex_unlock(&uqm->userq_mutex);
+ }
+ mutex_unlock(&adev->userq_mutex);
+ return ret;
+}
+
+int amdgpu_userq_resume(struct amdgpu_device *adev)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0, r;
+
+ if (!ip_mask)
+ return 0;
+
+ mutex_lock(&adev->userq_mutex);
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ mutex_lock(&uqm->userq_mutex);
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ if (adev->in_s0ix)
+ r = amdgpu_userq_restore_helper(uqm, queue);
+ else
+ r = amdgpu_userq_map_helper(uqm, queue);
+ if (r)
+ ret = r;
+ }
+ mutex_unlock(&uqm->userq_mutex);
+ }
+ mutex_unlock(&adev->userq_mutex);
+ return ret;
+}
+
+int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0, r;
+
+ /* only need to stop gfx/compute */
+ if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
+ return 0;
+
+ mutex_lock(&adev->userq_mutex);
+ if (adev->userq_halt_for_enforce_isolation)
+ dev_warn(adev->dev, "userq scheduling already stopped!\n");
+ adev->userq_halt_for_enforce_isolation = true;
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ cancel_delayed_work_sync(&uqm->resume_work);
+ mutex_lock(&uqm->userq_mutex);
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
+ (queue->xcp_id == idx)) {
+ r = amdgpu_userq_preempt_helper(uqm, queue);
+ if (r)
+ ret = r;
+ }
+ }
+ mutex_unlock(&uqm->userq_mutex);
+ }
+ mutex_unlock(&adev->userq_mutex);
+ return ret;
+}
+
+int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx)
+{
+ u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_mgr *uqm, *tmp;
+ int queue_id;
+ int ret = 0, r;
+
+ /* only need to stop gfx/compute */
+ if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
+ return 0;
+
+ mutex_lock(&adev->userq_mutex);
+ if (!adev->userq_halt_for_enforce_isolation)
+ dev_warn(adev->dev, "userq scheduling already started!\n");
+ adev->userq_halt_for_enforce_isolation = false;
+ list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ mutex_lock(&uqm->userq_mutex);
+ idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
+ if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
+ (queue->xcp_id == idx)) {
+ r = amdgpu_userq_restore_helper(uqm, queue);
+ if (r)
+ ret = r;
+ }
+ }
+ mutex_unlock(&uqm->userq_mutex);
+ }
+ mutex_unlock(&adev->userq_mutex);
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
new file mode 100644
index 000000000000..c027dd916672
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGPU_USERQ_H_
+#define AMDGPU_USERQ_H_
+#include "amdgpu_eviction_fence.h"
+
+#define AMDGPU_MAX_USERQ_COUNT 512
+
+#define to_ev_fence(f) container_of(f, struct amdgpu_eviction_fence, base)
+#define uq_mgr_to_fpriv(u) container_of(u, struct amdgpu_fpriv, userq_mgr)
+#define work_to_uq_mgr(w, name) container_of(w, struct amdgpu_userq_mgr, name)
+
+enum amdgpu_userq_state {
+ AMDGPU_USERQ_STATE_UNMAPPED = 0,
+ AMDGPU_USERQ_STATE_MAPPED,
+ AMDGPU_USERQ_STATE_PREEMPTED,
+ AMDGPU_USERQ_STATE_HUNG,
+};
+
+struct amdgpu_mqd_prop;
+
+struct amdgpu_userq_obj {
+ void *cpu_ptr;
+ uint64_t gpu_addr;
+ struct amdgpu_bo *obj;
+};
+
+struct amdgpu_usermode_queue {
+ int queue_type;
+ enum amdgpu_userq_state state;
+ uint64_t doorbell_handle;
+ uint64_t doorbell_index;
+ uint64_t flags;
+ struct amdgpu_mqd_prop *userq_prop;
+ struct amdgpu_userq_mgr *userq_mgr;
+ struct amdgpu_vm *vm;
+ struct amdgpu_userq_obj mqd;
+ struct amdgpu_userq_obj db_obj;
+ struct amdgpu_userq_obj fw_obj;
+ struct amdgpu_userq_obj wptr_obj;
+ struct xarray fence_drv_xa;
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct dma_fence *last_fence;
+ u32 xcp_id;
+ int priority;
+ struct dentry *debugfs_queue;
+};
+
+struct amdgpu_userq_funcs {
+ int (*mqd_create)(struct amdgpu_userq_mgr *uq_mgr,
+ struct drm_amdgpu_userq_in *args,
+ struct amdgpu_usermode_queue *queue);
+ void (*mqd_destroy)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *uq);
+ int (*unmap)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+ int (*map)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+ int (*preempt)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+ int (*restore)(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_usermode_queue *queue);
+ int (*detect_and_reset)(struct amdgpu_device *adev,
+ int queue_type);
+};
+
+/* Usermode queues for gfx */
+struct amdgpu_userq_mgr {
+ struct idr userq_idr;
+ struct mutex userq_mutex;
+ struct amdgpu_device *adev;
+ struct delayed_work resume_work;
+ struct list_head list;
+ struct drm_file *file;
+};
+
+struct amdgpu_db_info {
+ uint64_t doorbell_handle;
+ uint32_t queue_type;
+ uint32_t doorbell_offset;
+ struct amdgpu_userq_obj *db_obj;
+};
+
+int amdgpu_userq_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+
+int amdgpu_userq_mgr_init(struct amdgpu_userq_mgr *userq_mgr, struct drm_file *file_priv,
+ struct amdgpu_device *adev);
+
+void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr);
+
+int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj,
+ int size);
+
+void amdgpu_userq_destroy_object(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_userq_obj *userq_obj);
+
+void amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_eviction_fence *ev_fence);
+
+void amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *userq_mgr,
+ struct amdgpu_eviction_fence_mgr *evf_mgr);
+
+uint64_t amdgpu_userq_get_doorbell_index(struct amdgpu_userq_mgr *uq_mgr,
+ struct amdgpu_db_info *db_info,
+ struct drm_file *filp);
+
+u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev);
+
+int amdgpu_userq_suspend(struct amdgpu_device *adev);
+int amdgpu_userq_resume(struct amdgpu_device *adev);
+
+int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx);
+int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
+ u32 idx);
+
+int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr,
+ u64 expected_size);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
new file mode 100644
index 000000000000..761bad98da3e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
@@ -0,0 +1,1009 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/kref.h>
+#include <linux/slab.h>
+#include <linux/dma-fence-unwrap.h>
+
+#include <drm/drm_exec.h>
+#include <drm/drm_syncobj.h>
+
+#include "amdgpu.h"
+#include "amdgpu_userq_fence.h"
+
+static const struct dma_fence_ops amdgpu_userq_fence_ops;
+static struct kmem_cache *amdgpu_userq_fence_slab;
+
+int amdgpu_userq_fence_slab_init(void)
+{
+ amdgpu_userq_fence_slab = kmem_cache_create("amdgpu_userq_fence",
+ sizeof(struct amdgpu_userq_fence),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL);
+ if (!amdgpu_userq_fence_slab)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void amdgpu_userq_fence_slab_fini(void)
+{
+ rcu_barrier();
+ kmem_cache_destroy(amdgpu_userq_fence_slab);
+}
+
+static inline struct amdgpu_userq_fence *to_amdgpu_userq_fence(struct dma_fence *f)
+{
+ if (!f || f->ops != &amdgpu_userq_fence_ops)
+ return NULL;
+
+ return container_of(f, struct amdgpu_userq_fence, base);
+}
+
+static u64 amdgpu_userq_fence_read(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ return le64_to_cpu(*fence_drv->cpu_addr);
+}
+
+static void
+amdgpu_userq_fence_write(struct amdgpu_userq_fence_driver *fence_drv,
+ u64 seq)
+{
+ if (fence_drv->cpu_addr)
+ *fence_drv->cpu_addr = cpu_to_le64(seq);
+}
+
+int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
+ struct amdgpu_usermode_queue *userq)
+{
+ struct amdgpu_userq_fence_driver *fence_drv;
+ unsigned long flags;
+ int r;
+
+ fence_drv = kzalloc(sizeof(*fence_drv), GFP_KERNEL);
+ if (!fence_drv)
+ return -ENOMEM;
+
+ /* Acquire seq64 memory */
+ r = amdgpu_seq64_alloc(adev, &fence_drv->va, &fence_drv->gpu_addr,
+ &fence_drv->cpu_addr);
+ if (r)
+ goto free_fence_drv;
+
+ memset(fence_drv->cpu_addr, 0, sizeof(u64));
+
+ kref_init(&fence_drv->refcount);
+ INIT_LIST_HEAD(&fence_drv->fences);
+ spin_lock_init(&fence_drv->fence_list_lock);
+
+ fence_drv->adev = adev;
+ fence_drv->context = dma_fence_context_alloc(1);
+ get_task_comm(fence_drv->timeline_name, current);
+
+ xa_lock_irqsave(&adev->userq_xa, flags);
+ r = xa_err(__xa_store(&adev->userq_xa, userq->doorbell_index,
+ fence_drv, GFP_KERNEL));
+ xa_unlock_irqrestore(&adev->userq_xa, flags);
+ if (r)
+ goto free_seq64;
+
+ userq->fence_drv = fence_drv;
+
+ return 0;
+
+free_seq64:
+ amdgpu_seq64_free(adev, fence_drv->va);
+free_fence_drv:
+ kfree(fence_drv);
+
+ return r;
+}
+
+static void amdgpu_userq_walk_and_drop_fence_drv(struct xarray *xa)
+{
+ struct amdgpu_userq_fence_driver *fence_drv;
+ unsigned long index;
+
+ if (xa_empty(xa))
+ return;
+
+ xa_lock(xa);
+ xa_for_each(xa, index, fence_drv) {
+ __xa_erase(xa, index);
+ amdgpu_userq_fence_driver_put(fence_drv);
+ }
+
+ xa_unlock(xa);
+}
+
+void
+amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq)
+{
+ amdgpu_userq_walk_and_drop_fence_drv(&userq->fence_drv_xa);
+ xa_destroy(&userq->fence_drv_xa);
+ /* Drop the fence_drv reference held by user queue */
+ amdgpu_userq_fence_driver_put(userq->fence_drv);
+}
+
+void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ struct amdgpu_userq_fence *userq_fence, *tmp;
+ struct dma_fence *fence;
+ u64 rptr;
+ int i;
+
+ if (!fence_drv)
+ return;
+
+ rptr = amdgpu_userq_fence_read(fence_drv);
+
+ spin_lock(&fence_drv->fence_list_lock);
+ list_for_each_entry_safe(userq_fence, tmp, &fence_drv->fences, link) {
+ fence = &userq_fence->base;
+
+ if (rptr < fence->seqno)
+ break;
+
+ dma_fence_signal(fence);
+
+ for (i = 0; i < userq_fence->fence_drv_array_count; i++)
+ amdgpu_userq_fence_driver_put(userq_fence->fence_drv_array[i]);
+
+ list_del(&userq_fence->link);
+ dma_fence_put(fence);
+ }
+ spin_unlock(&fence_drv->fence_list_lock);
+}
+
+void amdgpu_userq_fence_driver_destroy(struct kref *ref)
+{
+ struct amdgpu_userq_fence_driver *fence_drv = container_of(ref,
+ struct amdgpu_userq_fence_driver,
+ refcount);
+ struct amdgpu_userq_fence_driver *xa_fence_drv;
+ struct amdgpu_device *adev = fence_drv->adev;
+ struct amdgpu_userq_fence *fence, *tmp;
+ struct xarray *xa = &adev->userq_xa;
+ unsigned long index, flags;
+ struct dma_fence *f;
+
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
+ list_for_each_entry_safe(fence, tmp, &fence_drv->fences, link) {
+ f = &fence->base;
+
+ if (!dma_fence_is_signaled(f)) {
+ dma_fence_set_error(f, -ECANCELED);
+ dma_fence_signal(f);
+ }
+
+ list_del(&fence->link);
+ dma_fence_put(f);
+ }
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
+
+ xa_lock_irqsave(xa, flags);
+ xa_for_each(xa, index, xa_fence_drv)
+ if (xa_fence_drv == fence_drv)
+ __xa_erase(xa, index);
+ xa_unlock_irqrestore(xa, flags);
+
+ /* Free seq64 memory */
+ amdgpu_seq64_free(adev, fence_drv->va);
+ kfree(fence_drv);
+}
+
+void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ kref_get(&fence_drv->refcount);
+}
+
+void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv)
+{
+ kref_put(&fence_drv->refcount, amdgpu_userq_fence_driver_destroy);
+}
+
+static int amdgpu_userq_fence_alloc(struct amdgpu_userq_fence **userq_fence)
+{
+ *userq_fence = kmem_cache_alloc(amdgpu_userq_fence_slab, GFP_ATOMIC);
+ return *userq_fence ? 0 : -ENOMEM;
+}
+
+static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
+ struct amdgpu_userq_fence *userq_fence,
+ u64 seq, struct dma_fence **f)
+{
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct dma_fence *fence;
+ unsigned long flags;
+
+ fence_drv = userq->fence_drv;
+ if (!fence_drv)
+ return -EINVAL;
+
+ spin_lock_init(&userq_fence->lock);
+ INIT_LIST_HEAD(&userq_fence->link);
+ fence = &userq_fence->base;
+ userq_fence->fence_drv = fence_drv;
+
+ dma_fence_init64(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
+ fence_drv->context, seq);
+
+ amdgpu_userq_fence_driver_get(fence_drv);
+ dma_fence_get(fence);
+
+ if (!xa_empty(&userq->fence_drv_xa)) {
+ struct amdgpu_userq_fence_driver *stored_fence_drv;
+ unsigned long index, count = 0;
+ int i = 0;
+
+ xa_lock(&userq->fence_drv_xa);
+ xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv)
+ count++;
+
+ userq_fence->fence_drv_array =
+ kvmalloc_array(count,
+ sizeof(struct amdgpu_userq_fence_driver *),
+ GFP_ATOMIC);
+
+ if (userq_fence->fence_drv_array) {
+ xa_for_each(&userq->fence_drv_xa, index, stored_fence_drv) {
+ userq_fence->fence_drv_array[i] = stored_fence_drv;
+ __xa_erase(&userq->fence_drv_xa, index);
+ i++;
+ }
+ }
+
+ userq_fence->fence_drv_array_count = i;
+ xa_unlock(&userq->fence_drv_xa);
+ } else {
+ userq_fence->fence_drv_array = NULL;
+ userq_fence->fence_drv_array_count = 0;
+ }
+
+ /* Check if hardware has already processed the job */
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
+ if (!dma_fence_is_signaled(fence))
+ list_add_tail(&userq_fence->link, &fence_drv->fences);
+ else
+ dma_fence_put(fence);
+
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
+
+ *f = fence;
+
+ return 0;
+}
+
+static const char *amdgpu_userq_fence_get_driver_name(struct dma_fence *f)
+{
+ return "amdgpu_userq_fence";
+}
+
+static const char *amdgpu_userq_fence_get_timeline_name(struct dma_fence *f)
+{
+ struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
+
+ return fence->fence_drv->timeline_name;
+}
+
+static bool amdgpu_userq_fence_signaled(struct dma_fence *f)
+{
+ struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
+ struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
+ u64 rptr, wptr;
+
+ rptr = amdgpu_userq_fence_read(fence_drv);
+ wptr = fence->base.seqno;
+
+ if (rptr >= wptr)
+ return true;
+
+ return false;
+}
+
+static void amdgpu_userq_fence_free(struct rcu_head *rcu)
+{
+ struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
+ struct amdgpu_userq_fence *userq_fence = to_amdgpu_userq_fence(fence);
+ struct amdgpu_userq_fence_driver *fence_drv = userq_fence->fence_drv;
+
+ /* Release the fence driver reference */
+ amdgpu_userq_fence_driver_put(fence_drv);
+
+ kvfree(userq_fence->fence_drv_array);
+ kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
+}
+
+static void amdgpu_userq_fence_release(struct dma_fence *f)
+{
+ call_rcu(&f->rcu, amdgpu_userq_fence_free);
+}
+
+static const struct dma_fence_ops amdgpu_userq_fence_ops = {
+ .get_driver_name = amdgpu_userq_fence_get_driver_name,
+ .get_timeline_name = amdgpu_userq_fence_get_timeline_name,
+ .signaled = amdgpu_userq_fence_signaled,
+ .release = amdgpu_userq_fence_release,
+};
+
+/**
+ * amdgpu_userq_fence_read_wptr - Read the userq wptr value
+ *
+ * @queue: user mode queue structure pointer
+ * @wptr: write pointer value
+ *
+ * Read the wptr value from userq's MQD. The userq signal IOCTL
+ * creates a dma_fence for the shared buffers that expects the
+ * RPTR value written to seq64 memory >= WPTR.
+ *
+ * Returns wptr value on success, error on failure.
+ */
+static int amdgpu_userq_fence_read_wptr(struct amdgpu_usermode_queue *queue,
+ u64 *wptr)
+{
+ struct amdgpu_bo_va_mapping *mapping;
+ struct amdgpu_bo *bo;
+ u64 addr, *ptr;
+ int r;
+
+ r = amdgpu_bo_reserve(queue->vm->root.bo, false);
+ if (r)
+ return r;
+
+ addr = queue->userq_prop->wptr_gpu_addr;
+ addr &= AMDGPU_GMC_HOLE_MASK;
+
+ mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, addr >> PAGE_SHIFT);
+ if (!mapping) {
+ amdgpu_bo_unreserve(queue->vm->root.bo);
+ DRM_ERROR("Failed to lookup amdgpu_bo_va_mapping\n");
+ return -EINVAL;
+ }
+
+ bo = amdgpu_bo_ref(mapping->bo_va->base.bo);
+ amdgpu_bo_unreserve(queue->vm->root.bo);
+ r = amdgpu_bo_reserve(bo, true);
+ if (r) {
+ DRM_ERROR("Failed to reserve userqueue wptr bo");
+ return r;
+ }
+
+ r = amdgpu_bo_kmap(bo, (void **)&ptr);
+ if (r) {
+ DRM_ERROR("Failed mapping the userqueue wptr bo");
+ goto map_error;
+ }
+
+ *wptr = le64_to_cpu(*ptr);
+
+ amdgpu_bo_kunmap(bo);
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+
+ return 0;
+
+map_error:
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+
+ return r;
+}
+
+static void amdgpu_userq_fence_cleanup(struct dma_fence *fence)
+{
+ dma_fence_put(fence);
+}
+
+static void
+amdgpu_userq_fence_driver_set_error(struct amdgpu_userq_fence *fence,
+ int error)
+{
+ struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
+ unsigned long flags;
+ struct dma_fence *f;
+
+ spin_lock_irqsave(&fence_drv->fence_list_lock, flags);
+
+ f = rcu_dereference_protected(&fence->base,
+ lockdep_is_held(&fence_drv->fence_list_lock));
+ if (f && !dma_fence_is_signaled_locked(f))
+ dma_fence_set_error(f, error);
+ spin_unlock_irqrestore(&fence_drv->fence_list_lock, flags);
+}
+
+void
+amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq)
+{
+ struct dma_fence *f = userq->last_fence;
+
+ if (f) {
+ struct amdgpu_userq_fence *fence = to_amdgpu_userq_fence(f);
+ struct amdgpu_userq_fence_driver *fence_drv = fence->fence_drv;
+ u64 wptr = fence->base.seqno;
+
+ amdgpu_userq_fence_driver_set_error(fence, -ECANCELED);
+ amdgpu_userq_fence_write(fence_drv, wptr);
+ amdgpu_userq_fence_driver_process(fence_drv);
+
+ }
+}
+
+int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
+ struct drm_amdgpu_userq_signal *args = data;
+ struct drm_gem_object **gobj_write = NULL;
+ struct drm_gem_object **gobj_read = NULL;
+ struct amdgpu_usermode_queue *queue;
+ struct amdgpu_userq_fence *userq_fence;
+ struct drm_syncobj **syncobj = NULL;
+ u32 *bo_handles_write, num_write_bo_handles;
+ u32 *syncobj_handles, num_syncobj_handles;
+ u32 *bo_handles_read, num_read_bo_handles;
+ int r, i, entry, rentry, wentry;
+ struct dma_fence *fence;
+ struct drm_exec exec;
+ u64 wptr;
+
+ num_syncobj_handles = args->num_syncobj_handles;
+ syncobj_handles = memdup_user(u64_to_user_ptr(args->syncobj_handles),
+ size_mul(sizeof(u32), num_syncobj_handles));
+ if (IS_ERR(syncobj_handles))
+ return PTR_ERR(syncobj_handles);
+
+ /* Array of pointers to the looked up syncobjs */
+ syncobj = kmalloc_array(num_syncobj_handles, sizeof(*syncobj), GFP_KERNEL);
+ if (!syncobj) {
+ r = -ENOMEM;
+ goto free_syncobj_handles;
+ }
+
+ for (entry = 0; entry < num_syncobj_handles; entry++) {
+ syncobj[entry] = drm_syncobj_find(filp, syncobj_handles[entry]);
+ if (!syncobj[entry]) {
+ r = -ENOENT;
+ goto free_syncobj;
+ }
+ }
+
+ num_read_bo_handles = args->num_bo_read_handles;
+ bo_handles_read = memdup_user(u64_to_user_ptr(args->bo_read_handles),
+ sizeof(u32) * num_read_bo_handles);
+ if (IS_ERR(bo_handles_read)) {
+ r = PTR_ERR(bo_handles_read);
+ goto free_syncobj;
+ }
+
+ /* Array of pointers to the GEM read objects */
+ gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
+ if (!gobj_read) {
+ r = -ENOMEM;
+ goto free_bo_handles_read;
+ }
+
+ for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
+ gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
+ if (!gobj_read[rentry]) {
+ r = -ENOENT;
+ goto put_gobj_read;
+ }
+ }
+
+ num_write_bo_handles = args->num_bo_write_handles;
+ bo_handles_write = memdup_user(u64_to_user_ptr(args->bo_write_handles),
+ sizeof(u32) * num_write_bo_handles);
+ if (IS_ERR(bo_handles_write)) {
+ r = PTR_ERR(bo_handles_write);
+ goto put_gobj_read;
+ }
+
+ /* Array of pointers to the GEM write objects */
+ gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
+ if (!gobj_write) {
+ r = -ENOMEM;
+ goto free_bo_handles_write;
+ }
+
+ for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
+ gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
+ if (!gobj_write[wentry]) {
+ r = -ENOENT;
+ goto put_gobj_write;
+ }
+ }
+
+ /* Retrieve the user queue */
+ queue = idr_find(&userq_mgr->userq_idr, args->queue_id);
+ if (!queue) {
+ r = -ENOENT;
+ goto put_gobj_write;
+ }
+
+ r = amdgpu_userq_fence_read_wptr(queue, &wptr);
+ if (r)
+ goto put_gobj_write;
+
+ r = amdgpu_userq_fence_alloc(&userq_fence);
+ if (r)
+ goto put_gobj_write;
+
+ /* We are here means UQ is active, make sure the eviction fence is valid */
+ amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
+
+ /* Create a new fence */
+ r = amdgpu_userq_fence_create(queue, userq_fence, wptr, &fence);
+ if (r) {
+ mutex_unlock(&userq_mgr->userq_mutex);
+ kmem_cache_free(amdgpu_userq_fence_slab, userq_fence);
+ goto put_gobj_write;
+ }
+
+ dma_fence_put(queue->last_fence);
+ queue->last_fence = dma_fence_get(fence);
+ mutex_unlock(&userq_mgr->userq_mutex);
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
+ (num_read_bo_handles + num_write_bo_handles));
+
+ /* Lock all BOs with retry handling */
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ amdgpu_userq_fence_cleanup(fence);
+ goto exec_fini;
+ }
+
+ r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ amdgpu_userq_fence_cleanup(fence);
+ goto exec_fini;
+ }
+ }
+
+ for (i = 0; i < num_read_bo_handles; i++) {
+ if (!gobj_read || !gobj_read[i]->resv)
+ continue;
+
+ dma_resv_add_fence(gobj_read[i]->resv, fence,
+ DMA_RESV_USAGE_READ);
+ }
+
+ for (i = 0; i < num_write_bo_handles; i++) {
+ if (!gobj_write || !gobj_write[i]->resv)
+ continue;
+
+ dma_resv_add_fence(gobj_write[i]->resv, fence,
+ DMA_RESV_USAGE_WRITE);
+ }
+
+ /* Add the created fence to syncobj/BO's */
+ for (i = 0; i < num_syncobj_handles; i++)
+ drm_syncobj_replace_fence(syncobj[i], fence);
+
+ /* drop the reference acquired in fence creation function */
+ dma_fence_put(fence);
+
+exec_fini:
+ drm_exec_fini(&exec);
+put_gobj_write:
+ while (wentry-- > 0)
+ drm_gem_object_put(gobj_write[wentry]);
+ kfree(gobj_write);
+free_bo_handles_write:
+ kfree(bo_handles_write);
+put_gobj_read:
+ while (rentry-- > 0)
+ drm_gem_object_put(gobj_read[rentry]);
+ kfree(gobj_read);
+free_bo_handles_read:
+ kfree(bo_handles_read);
+free_syncobj:
+ while (entry-- > 0)
+ if (syncobj[entry])
+ drm_syncobj_put(syncobj[entry]);
+ kfree(syncobj);
+free_syncobj_handles:
+ kfree(syncobj_handles);
+
+ return r;
+}
+
+int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ u32 *syncobj_handles, *timeline_points, *timeline_handles, *bo_handles_read, *bo_handles_write;
+ u32 num_syncobj, num_read_bo_handles, num_write_bo_handles;
+ struct drm_amdgpu_userq_fence_info *fence_info = NULL;
+ struct drm_amdgpu_userq_wait *wait_info = data;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_userq_mgr *userq_mgr = &fpriv->userq_mgr;
+ struct amdgpu_usermode_queue *waitq;
+ struct drm_gem_object **gobj_write;
+ struct drm_gem_object **gobj_read;
+ struct dma_fence **fences = NULL;
+ u16 num_points, num_fences = 0;
+ int r, i, rentry, wentry, cnt;
+ struct drm_exec exec;
+
+ num_read_bo_handles = wait_info->num_bo_read_handles;
+ bo_handles_read = memdup_user(u64_to_user_ptr(wait_info->bo_read_handles),
+ size_mul(sizeof(u32), num_read_bo_handles));
+ if (IS_ERR(bo_handles_read))
+ return PTR_ERR(bo_handles_read);
+
+ num_write_bo_handles = wait_info->num_bo_write_handles;
+ bo_handles_write = memdup_user(u64_to_user_ptr(wait_info->bo_write_handles),
+ size_mul(sizeof(u32), num_write_bo_handles));
+ if (IS_ERR(bo_handles_write)) {
+ r = PTR_ERR(bo_handles_write);
+ goto free_bo_handles_read;
+ }
+
+ num_syncobj = wait_info->num_syncobj_handles;
+ syncobj_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_handles),
+ size_mul(sizeof(u32), num_syncobj));
+ if (IS_ERR(syncobj_handles)) {
+ r = PTR_ERR(syncobj_handles);
+ goto free_bo_handles_write;
+ }
+
+ num_points = wait_info->num_syncobj_timeline_handles;
+ timeline_handles = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_handles),
+ sizeof(u32) * num_points);
+ if (IS_ERR(timeline_handles)) {
+ r = PTR_ERR(timeline_handles);
+ goto free_syncobj_handles;
+ }
+
+ timeline_points = memdup_user(u64_to_user_ptr(wait_info->syncobj_timeline_points),
+ sizeof(u32) * num_points);
+ if (IS_ERR(timeline_points)) {
+ r = PTR_ERR(timeline_points);
+ goto free_timeline_handles;
+ }
+
+ gobj_read = kmalloc_array(num_read_bo_handles, sizeof(*gobj_read), GFP_KERNEL);
+ if (!gobj_read) {
+ r = -ENOMEM;
+ goto free_timeline_points;
+ }
+
+ for (rentry = 0; rentry < num_read_bo_handles; rentry++) {
+ gobj_read[rentry] = drm_gem_object_lookup(filp, bo_handles_read[rentry]);
+ if (!gobj_read[rentry]) {
+ r = -ENOENT;
+ goto put_gobj_read;
+ }
+ }
+
+ gobj_write = kmalloc_array(num_write_bo_handles, sizeof(*gobj_write), GFP_KERNEL);
+ if (!gobj_write) {
+ r = -ENOMEM;
+ goto put_gobj_read;
+ }
+
+ for (wentry = 0; wentry < num_write_bo_handles; wentry++) {
+ gobj_write[wentry] = drm_gem_object_lookup(filp, bo_handles_write[wentry]);
+ if (!gobj_write[wentry]) {
+ r = -ENOENT;
+ goto put_gobj_write;
+ }
+ }
+
+ drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT,
+ (num_read_bo_handles + num_write_bo_handles));
+
+ /* Lock all BOs with retry handling */
+ drm_exec_until_all_locked(&exec) {
+ r = drm_exec_prepare_array(&exec, gobj_read, num_read_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ drm_exec_fini(&exec);
+ goto put_gobj_write;
+ }
+
+ r = drm_exec_prepare_array(&exec, gobj_write, num_write_bo_handles, 1);
+ drm_exec_retry_on_contention(&exec);
+ if (r) {
+ drm_exec_fini(&exec);
+ goto put_gobj_write;
+ }
+ }
+
+ if (!wait_info->num_fences) {
+ if (num_points) {
+ struct dma_fence_unwrap iter;
+ struct dma_fence *fence;
+ struct dma_fence *f;
+
+ for (i = 0; i < num_points; i++) {
+ r = drm_syncobj_find_fence(filp, timeline_handles[i],
+ timeline_points[i],
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto exec_fini;
+
+ dma_fence_unwrap_for_each(f, &iter, fence)
+ num_fences++;
+
+ dma_fence_put(fence);
+ }
+ }
+
+ /* Count syncobj's fence */
+ for (i = 0; i < num_syncobj; i++) {
+ struct dma_fence *fence;
+
+ r = drm_syncobj_find_fence(filp, syncobj_handles[i],
+ 0,
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto exec_fini;
+
+ num_fences++;
+ dma_fence_put(fence);
+ }
+
+ /* Count GEM objects fence */
+ for (i = 0; i < num_read_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
+ DMA_RESV_USAGE_READ, fence)
+ num_fences++;
+ }
+
+ for (i = 0; i < num_write_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
+ DMA_RESV_USAGE_WRITE, fence)
+ num_fences++;
+ }
+
+ /*
+ * Passing num_fences = 0 means that userspace doesn't want to
+ * retrieve userq_fence_info. If num_fences = 0 we skip filling
+ * userq_fence_info and return the actual number of fences on
+ * args->num_fences.
+ */
+ wait_info->num_fences = num_fences;
+ } else {
+ /* Array of fence info */
+ fence_info = kmalloc_array(wait_info->num_fences, sizeof(*fence_info), GFP_KERNEL);
+ if (!fence_info) {
+ r = -ENOMEM;
+ goto exec_fini;
+ }
+
+ /* Array of fences */
+ fences = kmalloc_array(wait_info->num_fences, sizeof(*fences), GFP_KERNEL);
+ if (!fences) {
+ r = -ENOMEM;
+ goto free_fence_info;
+ }
+
+ /* Retrieve GEM read objects fence */
+ for (i = 0; i < num_read_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_read[i]->resv,
+ DMA_RESV_USAGE_READ, fence) {
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ fences[num_fences++] = fence;
+ dma_fence_get(fence);
+ }
+ }
+
+ /* Retrieve GEM write objects fence */
+ for (i = 0; i < num_write_bo_handles; i++) {
+ struct dma_resv_iter resv_cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&resv_cursor, gobj_write[i]->resv,
+ DMA_RESV_USAGE_WRITE, fence) {
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ fences[num_fences++] = fence;
+ dma_fence_get(fence);
+ }
+ }
+
+ if (num_points) {
+ struct dma_fence_unwrap iter;
+ struct dma_fence *fence;
+ struct dma_fence *f;
+
+ for (i = 0; i < num_points; i++) {
+ r = drm_syncobj_find_fence(filp, timeline_handles[i],
+ timeline_points[i],
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto free_fences;
+
+ dma_fence_unwrap_for_each(f, &iter, fence) {
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ dma_fence_get(f);
+ fences[num_fences++] = f;
+ }
+
+ dma_fence_put(fence);
+ }
+ }
+
+ /* Retrieve syncobj's fence */
+ for (i = 0; i < num_syncobj; i++) {
+ struct dma_fence *fence;
+
+ r = drm_syncobj_find_fence(filp, syncobj_handles[i],
+ 0,
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT,
+ &fence);
+ if (r)
+ goto free_fences;
+
+ if (WARN_ON_ONCE(num_fences >= wait_info->num_fences)) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ fences[num_fences++] = fence;
+ }
+
+ /*
+ * Keep only the latest fences to reduce the number of values
+ * given back to userspace.
+ */
+ num_fences = dma_fence_dedup_array(fences, num_fences);
+
+ waitq = idr_find(&userq_mgr->userq_idr, wait_info->waitq_id);
+ if (!waitq) {
+ r = -EINVAL;
+ goto free_fences;
+ }
+
+ for (i = 0, cnt = 0; i < num_fences; i++) {
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct amdgpu_userq_fence *userq_fence;
+ u32 index;
+
+ userq_fence = to_amdgpu_userq_fence(fences[i]);
+ if (!userq_fence) {
+ /*
+ * Just waiting on other driver fences should
+ * be good for now
+ */
+ r = dma_fence_wait(fences[i], true);
+ if (r) {
+ dma_fence_put(fences[i]);
+ goto free_fences;
+ }
+
+ dma_fence_put(fences[i]);
+ continue;
+ }
+
+ fence_drv = userq_fence->fence_drv;
+ /*
+ * We need to make sure the user queue release their reference
+ * to the fence drivers at some point before queue destruction.
+ * Otherwise, we would gather those references until we don't
+ * have any more space left and crash.
+ */
+ r = xa_alloc(&waitq->fence_drv_xa, &index, fence_drv,
+ xa_limit_32b, GFP_KERNEL);
+ if (r)
+ goto free_fences;
+
+ amdgpu_userq_fence_driver_get(fence_drv);
+
+ /* Store drm syncobj's gpu va address and value */
+ fence_info[cnt].va = fence_drv->va;
+ fence_info[cnt].value = fences[i]->seqno;
+
+ dma_fence_put(fences[i]);
+ /* Increment the actual userq fence count */
+ cnt++;
+ }
+
+ wait_info->num_fences = cnt;
+ /* Copy userq fence info to user space */
+ if (copy_to_user(u64_to_user_ptr(wait_info->out_fences),
+ fence_info, wait_info->num_fences * sizeof(*fence_info))) {
+ r = -EFAULT;
+ goto free_fences;
+ }
+
+ kfree(fences);
+ kfree(fence_info);
+ }
+
+ drm_exec_fini(&exec);
+ for (i = 0; i < num_read_bo_handles; i++)
+ drm_gem_object_put(gobj_read[i]);
+ kfree(gobj_read);
+
+ for (i = 0; i < num_write_bo_handles; i++)
+ drm_gem_object_put(gobj_write[i]);
+ kfree(gobj_write);
+
+ kfree(timeline_points);
+ kfree(timeline_handles);
+ kfree(syncobj_handles);
+ kfree(bo_handles_write);
+ kfree(bo_handles_read);
+
+ return 0;
+
+free_fences:
+ while (num_fences-- > 0)
+ dma_fence_put(fences[num_fences]);
+ kfree(fences);
+free_fence_info:
+ kfree(fence_info);
+exec_fini:
+ drm_exec_fini(&exec);
+put_gobj_write:
+ while (wentry-- > 0)
+ drm_gem_object_put(gobj_write[wentry]);
+ kfree(gobj_write);
+put_gobj_read:
+ while (rentry-- > 0)
+ drm_gem_object_put(gobj_read[rentry]);
+ kfree(gobj_read);
+free_timeline_points:
+ kfree(timeline_points);
+free_timeline_handles:
+ kfree(timeline_handles);
+free_syncobj_handles:
+ kfree(syncobj_handles);
+free_bo_handles_write:
+ kfree(bo_handles_write);
+free_bo_handles_read:
+ kfree(bo_handles_read);
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
new file mode 100644
index 000000000000..d76add2afc77
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_USERQ_FENCE_H__
+#define __AMDGPU_USERQ_FENCE_H__
+
+#include <linux/types.h>
+
+#include "amdgpu_userq.h"
+
+struct amdgpu_userq_fence {
+ struct dma_fence base;
+ /*
+ * This lock is necessary to synchronize the
+ * userqueue dma fence operations.
+ */
+ spinlock_t lock;
+ struct list_head link;
+ unsigned long fence_drv_array_count;
+ struct amdgpu_userq_fence_driver *fence_drv;
+ struct amdgpu_userq_fence_driver **fence_drv_array;
+};
+
+struct amdgpu_userq_fence_driver {
+ struct kref refcount;
+ u64 va;
+ u64 gpu_addr;
+ u64 *cpu_addr;
+ u64 context;
+ /*
+ * This lock is necesaary to synchronize the access
+ * to the fences list by the fence driver.
+ */
+ spinlock_t fence_list_lock;
+ struct list_head fences;
+ struct amdgpu_device *adev;
+ char timeline_name[TASK_COMM_LEN];
+};
+
+int amdgpu_userq_fence_slab_init(void);
+void amdgpu_userq_fence_slab_fini(void);
+
+void amdgpu_userq_fence_driver_get(struct amdgpu_userq_fence_driver *fence_drv);
+void amdgpu_userq_fence_driver_put(struct amdgpu_userq_fence_driver *fence_drv);
+int amdgpu_userq_fence_driver_alloc(struct amdgpu_device *adev,
+ struct amdgpu_usermode_queue *userq);
+void amdgpu_userq_fence_driver_free(struct amdgpu_usermode_queue *userq);
+void amdgpu_userq_fence_driver_process(struct amdgpu_userq_fence_driver *fence_drv);
+void amdgpu_userq_fence_driver_force_completion(struct amdgpu_usermode_queue *userq);
+void amdgpu_userq_fence_driver_destroy(struct kref *ref);
+int amdgpu_userq_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+int amdgpu_userq_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h
new file mode 100644
index 000000000000..1e40ca3b1584
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2025 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGPU_UTILS_H_
+#define AMDGPU_UTILS_H_
+
+/* ---------- Generic 2‑bit capability attribute encoding ----------
+ * 00 INVALID, 01 RO, 10 WO, 11 RW
+ */
+enum amdgpu_cap_attr {
+ AMDGPU_CAP_ATTR_INVALID = 0,
+ AMDGPU_CAP_ATTR_RO = 1 << 0,
+ AMDGPU_CAP_ATTR_WO = 1 << 1,
+ AMDGPU_CAP_ATTR_RW = (AMDGPU_CAP_ATTR_RO | AMDGPU_CAP_ATTR_WO),
+};
+
+#define AMDGPU_CAP_ATTR_BITS 2
+#define AMDGPU_CAP_ATTR_MAX ((1U << AMDGPU_CAP_ATTR_BITS) - 1)
+
+/* Internal helper to build helpers for a given enum NAME */
+#define DECLARE_ATTR_CAP_CLASS_HELPERS(NAME) \
+enum { NAME##_BITMAP_BITS = NAME##_COUNT * AMDGPU_CAP_ATTR_BITS }; \
+struct NAME##_caps { \
+ DECLARE_BITMAP(bmap, NAME##_BITMAP_BITS); \
+}; \
+static inline unsigned int NAME##_ATTR_START(enum NAME##_cap_id cap) \
+{ return (unsigned int)cap * AMDGPU_CAP_ATTR_BITS; } \
+static inline void NAME##_attr_init(struct NAME##_caps *c) \
+{ if (c) bitmap_zero(c->bmap, NAME##_BITMAP_BITS); } \
+static inline int NAME##_attr_set(struct NAME##_caps *c, \
+ enum NAME##_cap_id cap, enum amdgpu_cap_attr attr) \
+{ \
+ if (!c) \
+ return -EINVAL; \
+ if (cap >= NAME##_COUNT) \
+ return -EINVAL; \
+ if ((unsigned int)attr > AMDGPU_CAP_ATTR_MAX) \
+ return -EINVAL; \
+ bitmap_write(c->bmap, (unsigned long)attr, \
+ NAME##_ATTR_START(cap), AMDGPU_CAP_ATTR_BITS); \
+ return 0; \
+} \
+static inline int NAME##_attr_get(const struct NAME##_caps *c, \
+ enum NAME##_cap_id cap, enum amdgpu_cap_attr *out) \
+{ \
+ unsigned long v; \
+ if (!c || !out) \
+ return -EINVAL; \
+ if (cap >= NAME##_COUNT) \
+ return -EINVAL; \
+ v = bitmap_read(c->bmap, NAME##_ATTR_START(cap), AMDGPU_CAP_ATTR_BITS); \
+ *out = (enum amdgpu_cap_attr)v; \
+ return 0; \
+} \
+static inline bool NAME##_cap_is_ro(const struct NAME##_caps *c, enum NAME##_cap_id id) \
+{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_RO; } \
+static inline bool NAME##_cap_is_wo(const struct NAME##_caps *c, enum NAME##_cap_id id) \
+{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_WO; } \
+static inline bool NAME##_cap_is_rw(const struct NAME##_caps *c, enum NAME##_cap_id id) \
+{ enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_RW; }
+
+/* Element expander for enum creation */
+#define _CAP_ENUM_ELEM(x) x,
+
+/* Public macro: declare enum + helpers from an X‑macro list */
+#define DECLARE_ATTR_CAP_CLASS(NAME, LIST_MACRO) \
+ enum NAME##_cap_id { LIST_MACRO(_CAP_ENUM_ELEM) NAME##_COUNT }; \
+ DECLARE_ATTR_CAP_CLASS_HELPERS(NAME)
+
+#endif /* AMDGPU_UTILS_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 2ca09f111f08..5c38f0d30c87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -30,15 +30,19 @@
#include <linux/firmware.h>
#include <linux/module.h>
-#include <drm/drmP.h>
+
#include <drm/drm.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_uvd.h"
+#include "amdgpu_cs.h"
#include "cikd.h"
#include "uvd/uvd_4_2_d.h"
+#include "amdgpu_ras.h"
+
/* 1 second timeout */
#define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000)
@@ -52,12 +56,18 @@
#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
/* Firmware Names */
+#ifdef CONFIG_DRM_AMDGPU_SI
+#define FIRMWARE_TAHITI "amdgpu/tahiti_uvd.bin"
+#define FIRMWARE_VERDE "amdgpu/verde_uvd.bin"
+#define FIRMWARE_PITCAIRN "amdgpu/pitcairn_uvd.bin"
+#define FIRMWARE_OLAND "amdgpu/oland_uvd.bin"
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
-#define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin"
-#define FIRMWARE_KABINI "radeon/kabini_uvd.bin"
-#define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin"
-#define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin"
-#define FIRMWARE_MULLINS "radeon/mullins_uvd.bin"
+#define FIRMWARE_BONAIRE "amdgpu/bonaire_uvd.bin"
+#define FIRMWARE_KABINI "amdgpu/kabini_uvd.bin"
+#define FIRMWARE_KAVERI "amdgpu/kaveri_uvd.bin"
+#define FIRMWARE_HAWAII "amdgpu/hawaii_uvd.bin"
+#define FIRMWARE_MULLINS "amdgpu/mullins_uvd.bin"
#endif
#define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin"
#define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin"
@@ -66,34 +76,44 @@
#define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin"
#define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin"
#define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin"
+#define FIRMWARE_VEGAM "amdgpu/vegam_uvd.bin"
#define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin"
+#define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin"
+#define FIRMWARE_VEGA20 "amdgpu/vega20_uvd.bin"
-#define mmUVD_GPCOM_VCPU_DATA0_VEGA10 (0x03c4 + 0x7e00)
-#define mmUVD_GPCOM_VCPU_DATA1_VEGA10 (0x03c5 + 0x7e00)
-#define mmUVD_GPCOM_VCPU_CMD_VEGA10 (0x03c3 + 0x7e00)
-#define mmUVD_NO_OP_VEGA10 (0x03ff + 0x7e00)
-#define mmUVD_ENGINE_CNTL_VEGA10 (0x03c6 + 0x7e00)
+/* These are common relative offsets for all asics, from uvd_7_0_offset.h, */
+#define UVD_GPCOM_VCPU_CMD 0x03c3
+#define UVD_GPCOM_VCPU_DATA0 0x03c4
+#define UVD_GPCOM_VCPU_DATA1 0x03c5
+#define UVD_NO_OP 0x03ff
+#define UVD_BASE_SI 0x3800
-/**
+/*
* amdgpu_uvd_cs_ctx - Command submission parser context
*
* Used for emulating virtual memory support on UVD 4.2.
*/
struct amdgpu_uvd_cs_ctx {
struct amdgpu_cs_parser *parser;
- unsigned reg, count;
- unsigned data0, data1;
- unsigned idx;
- unsigned ib_idx;
+ unsigned int reg, count;
+ unsigned int data0, data1;
+ unsigned int idx;
+ struct amdgpu_ib *ib;
/* does the IB has a msg command */
bool has_msg_cmd;
/* minimum buffer sizes */
- unsigned *buf_sizes;
+ unsigned int *buf_sizes;
};
+#ifdef CONFIG_DRM_AMDGPU_SI
+MODULE_FIRMWARE(FIRMWARE_TAHITI);
+MODULE_FIRMWARE(FIRMWARE_VERDE);
+MODULE_FIRMWARE(FIRMWARE_PITCAIRN);
+MODULE_FIRMWARE(FIRMWARE_OLAND);
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
MODULE_FIRMWARE(FIRMWARE_BONAIRE);
MODULE_FIRMWARE(FIRMWARE_KABINI);
@@ -108,24 +128,84 @@ MODULE_FIRMWARE(FIRMWARE_STONEY);
MODULE_FIRMWARE(FIRMWARE_POLARIS10);
MODULE_FIRMWARE(FIRMWARE_POLARIS11);
MODULE_FIRMWARE(FIRMWARE_POLARIS12);
+MODULE_FIRMWARE(FIRMWARE_VEGAM);
MODULE_FIRMWARE(FIRMWARE_VEGA10);
+MODULE_FIRMWARE(FIRMWARE_VEGA12);
+MODULE_FIRMWARE(FIRMWARE_VEGA20);
static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
+static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo);
+
+static int amdgpu_uvd_create_msg_bo_helper(struct amdgpu_device *adev,
+ uint32_t size,
+ struct amdgpu_bo **bo_ptr)
+{
+ struct ttm_operation_ctx ctx = { true, false };
+ struct amdgpu_bo *bo = NULL;
+ void *addr;
+ int r;
+
+ r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &bo, NULL, &addr);
+ if (r)
+ return r;
+
+ if (adev->uvd.address_64_bit)
+ goto succ;
+
+ amdgpu_bo_kunmap(bo);
+ amdgpu_bo_unpin(bo);
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
+ amdgpu_uvd_force_into_uvd_segment(bo);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (r)
+ goto err;
+ r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_VRAM);
+ if (r)
+ goto err_pin;
+ r = amdgpu_bo_kmap(bo, &addr);
+ if (r)
+ goto err_kmap;
+succ:
+ amdgpu_bo_unreserve(bo);
+ *bo_ptr = bo;
+ return 0;
+err_kmap:
+ amdgpu_bo_unpin(bo);
+err_pin:
+err:
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+ return r;
+}
int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring;
- struct amd_sched_rq *rq;
unsigned long bo_size;
const char *fw_name;
const struct common_firmware_header *hdr;
- unsigned version_major, version_minor, family_id;
- int i, r;
+ unsigned int family_id;
+ int i, j, r;
INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_TAHITI:
+ fw_name = FIRMWARE_TAHITI;
+ break;
+ case CHIP_VERDE:
+ fw_name = FIRMWARE_VERDE;
+ break;
+ case CHIP_PITCAIRN:
+ fw_name = FIRMWARE_PITCAIRN;
+ break;
+ case CHIP_OLAND:
+ fw_name = FIRMWARE_OLAND;
+ break;
+#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
fw_name = FIRMWARE_BONAIRE;
@@ -161,29 +241,30 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
case CHIP_POLARIS11:
fw_name = FIRMWARE_POLARIS11;
break;
+ case CHIP_POLARIS12:
+ fw_name = FIRMWARE_POLARIS12;
+ break;
case CHIP_VEGA10:
fw_name = FIRMWARE_VEGA10;
break;
- case CHIP_POLARIS12:
- fw_name = FIRMWARE_POLARIS12;
+ case CHIP_VEGA12:
+ fw_name = FIRMWARE_VEGA12;
+ break;
+ case CHIP_VEGAM:
+ fw_name = FIRMWARE_VEGAM;
+ break;
+ case CHIP_VEGA20:
+ fw_name = FIRMWARE_VEGA20;
break;
default:
return -EINVAL;
}
- r = request_firmware(&adev->uvd.fw, fw_name, adev->dev);
- if (r) {
- dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n",
- fw_name);
- return r;
- }
-
- r = amdgpu_ucode_validate(adev->uvd.fw);
+ r = amdgpu_ucode_request(adev, &adev->uvd.fw, AMDGPU_UCODE_REQUIRED, "%s", fw_name);
if (r) {
dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
fw_name);
- release_firmware(adev->uvd.fw);
- adev->uvd.fw = NULL;
+ amdgpu_ucode_release(&adev->uvd.fw);
return r;
}
@@ -192,50 +273,65 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
- version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
- version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
- DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
- version_major, version_minor, family_id);
-
- /*
- * Limit the number of UVD handles depending on microcode major
- * and minor versions. The firmware version which has 40 UVD
- * instances support is 1.80. So all subsequent versions should
- * also have the same support.
- */
- if ((version_major > 0x01) ||
- ((version_major == 0x01) && (version_minor >= 0x50)))
- adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
- adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
- (family_id << 8));
+ if (adev->asic_type < CHIP_VEGA20) {
+ unsigned int version_major, version_minor;
+
+ version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
+ version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+ DRM_INFO("Found UVD firmware Version: %u.%u Family ID: %u\n",
+ version_major, version_minor, family_id);
+
+ /*
+ * Limit the number of UVD handles depending on microcode major
+ * and minor versions. The firmware version which has 40 UVD
+ * instances support is 1.80. So all subsequent versions should
+ * also have the same support.
+ */
+ if ((version_major > 0x01) ||
+ ((version_major == 0x01) && (version_minor >= 0x50)))
+ adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
+
+ adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
+ (family_id << 8));
+
+ if ((adev->asic_type == CHIP_POLARIS10 ||
+ adev->asic_type == CHIP_POLARIS11) &&
+ (adev->uvd.fw_version < FW_1_66_16))
+ DRM_ERROR("POLARIS10/11 UVD firmware version %u.%u is too old.\n",
+ version_major, version_minor);
+ } else {
+ unsigned int enc_major, enc_minor, dec_minor;
+
+ dec_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+ enc_minor = (le32_to_cpu(hdr->ucode_version) >> 24) & 0x3f;
+ enc_major = (le32_to_cpu(hdr->ucode_version) >> 30) & 0x3;
+ DRM_INFO("Found UVD firmware ENC: %u.%u DEC: .%u Family ID: %u\n",
+ enc_major, enc_minor, dec_minor, family_id);
+
+ adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
- if ((adev->asic_type == CHIP_POLARIS10 ||
- adev->asic_type == CHIP_POLARIS11) &&
- (adev->uvd.fw_version < FW_1_66_16))
- DRM_ERROR("POLARIS10/11 UVD firmware version %hu.%hu is too old.\n",
- version_major, version_minor);
+ adev->uvd.fw_version = le32_to_cpu(hdr->ucode_version);
+ }
bo_size = AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE
+ AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
- r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.vcpu_bo,
- &adev->uvd.gpu_addr, &adev->uvd.cpu_addr);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
- return r;
- }
-
- ring = &adev->uvd.ring;
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
- r = amd_sched_entity_init(&ring->sched, &adev->uvd.entity,
- rq, amdgpu_sched_jobs);
- if (r != 0) {
- DRM_ERROR("Failed setting up UVD run queue.\n");
- return r;
+ for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
+ r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->uvd.inst[j].vcpu_bo,
+ &adev->uvd.inst[j].gpu_addr,
+ &adev->uvd.inst[j].cpu_addr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
+ return r;
+ }
}
for (i = 0; i < adev->uvd.max_handles; ++i) {
@@ -244,9 +340,13 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
}
/* from uvd v5.0 HW addressing capacity increased to 64 bits */
- if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
+ if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
adev->uvd.address_64_bit = true;
+ r = amdgpu_uvd_create_msg_bo_helper(adev, 128 << 10, &adev->uvd.ib_bo);
+ if (r)
+ return r;
+
switch (adev->asic_type) {
case CHIP_TONGA:
adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10;
@@ -269,98 +369,169 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
- kfree(adev->uvd.saved_bo);
+ void *addr = amdgpu_bo_kptr(adev->uvd.ib_bo);
+ int i, j;
+
+ drm_sched_entity_destroy(&adev->uvd.entity);
- amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
+ for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
+ kvfree(adev->uvd.inst[j].saved_bo);
- amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo,
- &adev->uvd.gpu_addr,
- (void **)&adev->uvd.cpu_addr);
+ amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
+ &adev->uvd.inst[j].gpu_addr,
+ (void **)&adev->uvd.inst[j].cpu_addr);
- amdgpu_ring_fini(&adev->uvd.ring);
+ amdgpu_ring_fini(&adev->uvd.inst[j].ring);
- release_firmware(adev->uvd.fw);
+ for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
+ amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
+ }
+ amdgpu_bo_free_kernel(&adev->uvd.ib_bo, NULL, &addr);
+ amdgpu_ucode_release(&adev->uvd.fw);
return 0;
}
-int amdgpu_uvd_suspend(struct amdgpu_device *adev)
+/**
+ * amdgpu_uvd_entity_init - init entity
+ *
+ * @adev: amdgpu_device pointer
+ * @ring: amdgpu_ring pointer to check
+ *
+ * Initialize the entity used for handle management in the kernel driver.
+ */
+int amdgpu_uvd_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
- unsigned size;
+ if (ring == &adev->uvd.inst[0].ring) {
+ struct drm_gpu_scheduler *sched = &ring->sched;
+ int r;
+
+ r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL,
+ &sched, 1, NULL);
+ if (r) {
+ DRM_ERROR("Failed setting up UVD kernel entity.\n");
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+int amdgpu_uvd_prepare_suspend(struct amdgpu_device *adev)
+{
+ unsigned int size;
void *ptr;
- int i;
+ int i, j, idx;
- if (adev->uvd.vcpu_bo == NULL)
- return 0;
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
- for (i = 0; i < adev->uvd.max_handles; ++i)
- if (atomic_read(&adev->uvd.handles[i]))
- break;
+ /* only valid for physical mode */
+ if (adev->asic_type < CHIP_POLARIS10) {
+ for (i = 0; i < adev->uvd.max_handles; ++i)
+ if (atomic_read(&adev->uvd.handles[i]))
+ break;
- if (i == AMDGPU_MAX_UVD_HANDLES)
- return 0;
+ if (i == adev->uvd.max_handles)
+ return 0;
+ }
- cancel_delayed_work_sync(&adev->uvd.idle_work);
+ for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
+ if (adev->uvd.inst[j].vcpu_bo == NULL)
+ continue;
- size = amdgpu_bo_size(adev->uvd.vcpu_bo);
- ptr = adev->uvd.cpu_addr;
+ size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
+ ptr = adev->uvd.inst[j].cpu_addr;
- adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
- if (!adev->uvd.saved_bo)
- return -ENOMEM;
+ adev->uvd.inst[j].saved_bo = kvmalloc(size, GFP_KERNEL);
+ if (!adev->uvd.inst[j].saved_bo)
+ return -ENOMEM;
- memcpy_fromio(adev->uvd.saved_bo, ptr, size);
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ /* re-write 0 since err_event_athub will corrupt VCPU buffer */
+ if (amdgpu_ras_intr_triggered())
+ memset(adev->uvd.inst[j].saved_bo, 0, size);
+ else
+ memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size);
+
+ drm_dev_exit(idx);
+ }
+ }
+
+ return 0;
+}
+
+int amdgpu_uvd_suspend(struct amdgpu_device *adev)
+{
+ if (amdgpu_ras_intr_triggered())
+ DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n");
return 0;
}
int amdgpu_uvd_resume(struct amdgpu_device *adev)
{
- unsigned size;
+ unsigned int size;
void *ptr;
+ int i, idx;
- if (adev->uvd.vcpu_bo == NULL)
- return -EINVAL;
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
+ if (adev->uvd.inst[i].vcpu_bo == NULL)
+ return -EINVAL;
- size = amdgpu_bo_size(adev->uvd.vcpu_bo);
- ptr = adev->uvd.cpu_addr;
+ size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo);
+ ptr = adev->uvd.inst[i].cpu_addr;
- if (adev->uvd.saved_bo != NULL) {
- memcpy_toio(ptr, adev->uvd.saved_bo, size);
- kfree(adev->uvd.saved_bo);
- adev->uvd.saved_bo = NULL;
- } else {
- const struct common_firmware_header *hdr;
- unsigned offset;
-
- hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- memcpy_toio(adev->uvd.cpu_addr, adev->uvd.fw->data + offset,
- le32_to_cpu(hdr->ucode_size_bytes));
- size -= le32_to_cpu(hdr->ucode_size_bytes);
- ptr += le32_to_cpu(hdr->ucode_size_bytes);
+ if (adev->uvd.inst[i].saved_bo != NULL) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
+ drm_dev_exit(idx);
+ }
+ kvfree(adev->uvd.inst[i].saved_bo);
+ adev->uvd.inst[i].saved_bo = NULL;
+ } else {
+ const struct common_firmware_header *hdr;
+ unsigned int offset;
+
+ hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset,
+ le32_to_cpu(hdr->ucode_size_bytes));
+ drm_dev_exit(idx);
+ }
+ size -= le32_to_cpu(hdr->ucode_size_bytes);
+ ptr += le32_to_cpu(hdr->ucode_size_bytes);
+ }
+ memset_io(ptr, 0, size);
+ /* to restore uvd fence seq */
+ amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring);
}
- memset_io(ptr, 0, size);
}
-
return 0;
}
void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
{
- struct amdgpu_ring *ring = &adev->uvd.ring;
+ struct amdgpu_ring *ring = &adev->uvd.inst[0].ring;
int i, r;
for (i = 0; i < adev->uvd.max_handles; ++i) {
uint32_t handle = atomic_read(&adev->uvd.handles[i]);
+
if (handle != 0 && adev->uvd.filp[i] == filp) {
struct dma_fence *fence;
- r = amdgpu_uvd_get_destroy_msg(ring, handle,
- false, &fence);
+ r = amdgpu_uvd_get_destroy_msg(ring, handle, false,
+ &fence);
if (r) {
- DRM_ERROR("Error destroying UVD (%d)!\n", r);
+ DRM_ERROR("Error destroying UVD %d!\n", r);
continue;
}
@@ -376,9 +547,12 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo)
{
int i;
+
for (i = 0; i < abo->placement.num_placement; ++i) {
abo->placements[i].fpfn = 0 >> PAGE_SHIFT;
abo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
+ if (abo->placements[i].mem_type == TTM_PL_VRAM)
+ abo->placements[i].flags |= TTM_PL_FLAG_CONTIGUOUS;
}
}
@@ -387,8 +561,8 @@ static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx)
uint32_t lo, hi;
uint64_t addr;
- lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0);
- hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1);
+ lo = amdgpu_ib_get_value(ctx->ib, ctx->data0);
+ hi = amdgpu_ib_get_value(ctx->ib, ctx->data1);
addr = ((uint64_t)lo) | (((uint64_t)hi) << 32);
return addr;
@@ -404,29 +578,31 @@ static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx)
*/
static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
{
+ struct ttm_operation_ctx tctx = { false, false };
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo;
uint32_t cmd;
uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
int r = 0;
- mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
- if (mapping == NULL) {
- DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
- return -EINVAL;
+ r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
+ if (r) {
+ DRM_ERROR("Can't find BO for addr 0x%08llx\n", addr);
+ return r;
}
if (!ctx->parser->adev->uvd.address_64_bit) {
/* check if it's a message or feedback command */
- cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
+ cmd = amdgpu_ib_get_value(ctx->ib, ctx->idx) >> 1;
if (cmd == 0x0 || cmd == 0x3) {
/* yes, force it into VRAM */
uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
- amdgpu_ttm_placement_from_domain(bo, domain);
+
+ amdgpu_bo_placement_from_domain(bo, domain);
}
amdgpu_uvd_force_into_uvd_segment(bo);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &tctx);
}
return r;
@@ -435,27 +611,28 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
/**
* amdgpu_uvd_cs_msg_decode - handle UVD decode message
*
+ * @adev: amdgpu_device pointer
* @msg: pointer to message structure
- * @buf_sizes: returned buffer sizes
+ * @buf_sizes: placeholder to put the different buffer lengths
*
* Peek into the decode message and calculate the necessary buffer sizes.
*/
static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
- unsigned buf_sizes[])
+ unsigned int buf_sizes[])
{
- unsigned stream_type = msg[4];
- unsigned width = msg[6];
- unsigned height = msg[7];
- unsigned dpb_size = msg[9];
- unsigned pitch = msg[28];
- unsigned level = msg[57];
+ unsigned int stream_type = msg[4];
+ unsigned int width = msg[6];
+ unsigned int height = msg[7];
+ unsigned int dpb_size = msg[9];
+ unsigned int pitch = msg[28];
+ unsigned int level = msg[57];
- unsigned width_in_mb = width / 16;
- unsigned height_in_mb = ALIGN(height / 16, 2);
- unsigned fs_in_mb = width_in_mb * height_in_mb;
+ unsigned int width_in_mb = width / 16;
+ unsigned int height_in_mb = ALIGN(height / 16, 2);
+ unsigned int fs_in_mb = width_in_mb * height_in_mb;
- unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
- unsigned min_ctx_size = ~0;
+ unsigned int image_size, tmp, min_dpb_size, num_dpb_buffer;
+ unsigned int min_ctx_size = ~0;
image_size = width * height;
image_size += image_size / 2;
@@ -463,7 +640,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
switch (stream_type) {
case 0: /* H264 */
- switch(level) {
+ switch (level) {
case 30:
num_dpb_buffer = 8100 / fs_in_mb;
break;
@@ -541,7 +718,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
break;
case 7: /* H264 Perf */
- switch(level) {
+ switch (level) {
case 30:
num_dpb_buffer = 8100 / fs_in_mb;
break;
@@ -574,7 +751,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
/* reference picture buffer */
min_dpb_size = image_size * num_dpb_buffer;
- if (!adev->uvd.use_ctx_buf){
+ if (!adev->uvd.use_ctx_buf) {
/* macroblock context buffer */
min_dpb_size +=
width_in_mb * height_in_mb * num_dpb_buffer * 192;
@@ -588,6 +765,10 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
}
break;
+ case 8: /* MJPEG */
+ min_dpb_size = 0;
+ break;
+
case 16: /* H265 */
image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2;
image_size = ALIGN(image_size, 256);
@@ -617,6 +798,8 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
buf_sizes[0x1] = dpb_size;
buf_sizes[0x2] = image_size;
buf_sizes[0x4] = min_ctx_size;
+ /* store image width to adjust nb memory pstate */
+ adev->uvd.decode_image_width = width;
return 0;
}
@@ -631,7 +814,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
* Make sure that we don't open up to many sessions.
*/
static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
- struct amdgpu_bo *bo, unsigned offset)
+ struct amdgpu_bo *bo, unsigned int offset)
{
struct amdgpu_device *adev = ctx->parser->adev;
int32_t *msg, msg_type, handle;
@@ -646,7 +829,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
r = amdgpu_bo_kmap(bo, &ptr);
if (r) {
- DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r);
+ DRM_ERROR("Failed mapping the UVD) message (%ld)!\n", r);
return r;
}
@@ -656,6 +839,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
handle = msg[2];
if (handle == 0) {
+ amdgpu_bo_kunmap(bo);
DRM_ERROR("Invalid UVD handle!\n");
return -EINVAL;
}
@@ -668,7 +852,8 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
/* try to alloc a new handle */
for (i = 0; i < adev->uvd.max_handles; ++i) {
if (atomic_read(&adev->uvd.handles[i]) == handle) {
- DRM_ERROR("Handle 0x%x already in use!\n", handle);
+ DRM_ERROR(")Handle 0x%x already in use!\n",
+ handle);
return -EINVAL;
}
@@ -711,9 +896,9 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
default:
DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
- return -EINVAL;
}
- BUG();
+
+ amdgpu_bo_kunmap(bo);
return -EINVAL;
}
@@ -733,10 +918,10 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
int r;
- mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
- if (mapping == NULL) {
- DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
- return -EINVAL;
+ r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
+ if (r) {
+ DRM_ERROR("Can't find BO for addr 0x%08llx\n", addr);
+ return r;
}
start = amdgpu_bo_gpu_offset(bo);
@@ -747,16 +932,14 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
start += addr;
- amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0,
- lower_32_bits(start));
- amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data1,
- upper_32_bits(start));
+ amdgpu_ib_set_value(ctx->ib, ctx->data0, lower_32_bits(start));
+ amdgpu_ib_set_value(ctx->ib, ctx->data1, upper_32_bits(start));
- cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1;
+ cmd = amdgpu_ib_get_value(ctx->ib, ctx->idx) >> 1;
if (cmd < 0x4) {
if ((end - start) < ctx->buf_sizes[cmd]) {
DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
- (unsigned)(end - start),
+ (unsigned int)(end - start),
ctx->buf_sizes[cmd]);
return -EINVAL;
}
@@ -764,7 +947,7 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
} else if (cmd == 0x206) {
if ((end - start) < ctx->buf_sizes[4]) {
DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
- (unsigned)(end - start),
+ (unsigned int)(end - start),
ctx->buf_sizes[4]);
return -EINVAL;
}
@@ -775,14 +958,14 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
if (!ctx->parser->adev->uvd.address_64_bit) {
if ((start >> 28) != ((end - 1) >> 28)) {
- DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
+ DRM_ERROR("reloc %llx-%llx crossing 256MB boundary!\n",
start, end);
return -EINVAL;
}
if ((cmd == 0 || cmd == 0x3) &&
- (start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) {
- DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
+ (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) {
+ DRM_ERROR("msg/fb buffer %llx-%llx out of 256MB segment!\n",
start, end);
return -EINVAL;
}
@@ -812,14 +995,13 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
{
- struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
int i, r;
ctx->idx++;
for (i = 0; i <= ctx->count; ++i) {
- unsigned reg = ctx->reg + i;
+ unsigned int reg = ctx->reg + i;
- if (ctx->idx >= ib->length_dw) {
+ if (ctx->idx >= ctx->ib->length_dw) {
DRM_ERROR("Register command after end of CS!\n");
return -EINVAL;
}
@@ -859,12 +1041,12 @@ static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx,
static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
int (*cb)(struct amdgpu_uvd_cs_ctx *ctx))
{
- struct amdgpu_ib *ib = &ctx->parser->job->ibs[ctx->ib_idx];
int r;
- for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) {
- uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx);
- unsigned type = CP_PACKET_GET_TYPE(cmd);
+ for (ctx->idx = 0 ; ctx->idx < ctx->ib->length_dw; ) {
+ uint32_t cmd = amdgpu_ib_get_value(ctx->ib, ctx->idx);
+ unsigned int type = CP_PACKET_GET_TYPE(cmd);
+
switch (type) {
case PACKET_TYPE0:
ctx->reg = CP_PACKET0_GET_REG(cmd);
@@ -888,24 +1070,26 @@ static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx,
* amdgpu_uvd_ring_parse_cs - UVD command submission parser
*
* @parser: Command submission parser context
+ * @job: the job to parse
+ * @ib: the IB to patch
*
* Parse the command stream, patch in addresses as necessary.
*/
-int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
+int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
{
struct amdgpu_uvd_cs_ctx ctx = {};
- unsigned buf_sizes[] = {
+ unsigned int buf_sizes[] = {
[0x00000000] = 2048,
[0x00000001] = 0xFFFFFFFF,
[0x00000002] = 0xFFFFFFFF,
[0x00000003] = 2048,
[0x00000004] = 0xFFFFFFFF,
};
- struct amdgpu_ib *ib = &parser->job->ibs[ib_idx];
int r;
- parser->job->vm = NULL;
- ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
+ job->vm = NULL;
if (ib->length_dw % 16) {
DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
@@ -913,13 +1097,9 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
return -EINVAL;
}
- r = amdgpu_cs_sysvm_access_required(parser);
- if (r)
- return r;
-
ctx.parser = parser;
ctx.buf_sizes = buf_sizes;
- ctx.ib_idx = ib_idx;
+ ctx.ib = ib;
/* first round only required on chips without UVD 64 bit address support */
if (!parser->adev->uvd.address_64_bit) {
@@ -945,51 +1125,31 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
bool direct, struct dma_fence **fence)
{
- struct ttm_validate_buffer tv;
- struct ww_acquire_ctx ticket;
- struct list_head head;
+ struct amdgpu_device *adev = ring->adev;
+ struct dma_fence *f = NULL;
+ uint32_t offset, data[4];
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- struct dma_fence *f = NULL;
- struct amdgpu_device *adev = ring->adev;
uint64_t addr;
- uint32_t data[4];
int i, r;
- memset(&tv, 0, sizeof(tv));
- tv.bo = &bo->tbo;
-
- INIT_LIST_HEAD(&head);
- list_add(&tv.head, &head);
-
- r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL);
+ r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ 64, direct ? AMDGPU_IB_POOL_DIRECT :
+ AMDGPU_IB_POOL_DELAYED, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
- if (!ring->adev->uvd.address_64_bit) {
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
- amdgpu_uvd_force_into_uvd_segment(bo);
- }
+ if (adev->asic_type >= CHIP_VEGA10)
+ offset = adev->reg_offset[UVD_HWIP][ring->me][1];
+ else
+ offset = UVD_BASE_SI;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
- if (r)
- goto err;
-
- r = amdgpu_job_alloc_with_ib(adev, 64, &job);
- if (r)
- goto err;
-
- if (adev->asic_type >= CHIP_VEGA10) {
- data[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0_VEGA10, 0);
- data[1] = PACKET0(mmUVD_GPCOM_VCPU_DATA1_VEGA10, 0);
- data[2] = PACKET0(mmUVD_GPCOM_VCPU_CMD_VEGA10, 0);
- data[3] = PACKET0(mmUVD_NO_OP_VEGA10, 0);
- } else {
- data[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0);
- data[1] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0);
- data[2] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0);
- data[3] = PACKET0(mmUVD_NO_OP, 0);
- }
+ data[0] = PACKET0(offset + UVD_GPCOM_VCPU_DATA0, 0);
+ data[1] = PACKET0(offset + UVD_GPCOM_VCPU_DATA1, 0);
+ data[2] = PACKET0(offset + UVD_GPCOM_VCPU_CMD, 0);
+ data[3] = PACKET0(offset + UVD_NO_OP, 0);
ib = &job->ibs[0];
addr = amdgpu_bo_gpu_offset(bo);
@@ -1006,68 +1166,47 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->length_dw = 16;
if (direct) {
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
+ r = amdgpu_job_submit_direct(job, ring, &f);
if (r)
goto err_free;
-
- amdgpu_job_free(job);
} else {
- r = amdgpu_job_submit(job, ring, &adev->uvd.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, &f);
+ r = drm_sched_job_add_resv_dependencies(&job->base,
+ bo->tbo.base.resv,
+ DMA_RESV_USAGE_KERNEL);
if (r)
goto err_free;
+
+ f = amdgpu_job_submit(job);
}
- ttm_eu_fence_buffer_objects(&ticket, &head, f);
+ amdgpu_bo_reserve(bo, true);
+ amdgpu_bo_fence(bo, f, false);
+ amdgpu_bo_unreserve(bo);
if (fence)
*fence = dma_fence_get(f);
- amdgpu_bo_unref(&bo);
dma_fence_put(f);
return 0;
err_free:
amdgpu_job_free(job);
-
-err:
- ttm_eu_backoff_reservation(&ticket, &head);
return r;
}
/* multiple fence commands without any stream commands in between can
- crash the vcpu so just try to emmit a dummy create/destroy msg to
- avoid this */
+ * crash the vcpu so just try to emmit a dummy create/destroy msg to
+ * avoid this
+ */
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_bo *bo;
+ struct amdgpu_bo *bo = adev->uvd.ib_bo;
uint32_t *msg;
- int r, i;
-
- r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &bo);
- if (r)
- return r;
-
- r = amdgpu_bo_reserve(bo, false);
- if (r) {
- amdgpu_bo_unref(&bo);
- return r;
- }
-
- r = amdgpu_bo_kmap(bo, (void **)&msg);
- if (r) {
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
- return r;
- }
+ int i;
+ msg = amdgpu_bo_kptr(bo);
/* stitch together an UVD create msg */
msg[0] = cpu_to_le32(0x00000de4);
msg[1] = cpu_to_le32(0x00000000);
@@ -1083,41 +1222,27 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = 11; i < 1024; ++i)
msg[i] = cpu_to_le32(0x0);
- amdgpu_bo_kunmap(bo);
- amdgpu_bo_unreserve(bo);
-
return amdgpu_uvd_send_msg(ring, bo, true, fence);
+
}
int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_bo *bo;
+ struct amdgpu_bo *bo = NULL;
uint32_t *msg;
int r, i;
- r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &bo);
- if (r)
- return r;
-
- r = amdgpu_bo_reserve(bo, false);
- if (r) {
- amdgpu_bo_unref(&bo);
- return r;
- }
-
- r = amdgpu_bo_kmap(bo, (void **)&msg);
- if (r) {
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
- return r;
+ if (direct) {
+ bo = adev->uvd.ib_bo;
+ } else {
+ r = amdgpu_uvd_create_msg_bo_helper(adev, 4096, &bo);
+ if (r)
+ return r;
}
+ msg = amdgpu_bo_kptr(bo);
/* stitch together an UVD destroy msg */
msg[0] = cpu_to_le32(0x00000de4);
msg[1] = cpu_to_le32(0x00000002);
@@ -1126,20 +1251,27 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = 4; i < 1024; ++i)
msg[i] = cpu_to_le32(0x0);
- amdgpu_bo_kunmap(bo);
- amdgpu_bo_unreserve(bo);
+ r = amdgpu_uvd_send_msg(ring, bo, direct, fence);
+
+ if (!direct)
+ amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
- return amdgpu_uvd_send_msg(ring, bo, direct, fence);
+ return r;
}
static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, uvd.idle_work.work);
- unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring);
-
- if (amdgpu_sriov_vf(adev))
- return;
+ unsigned int fences = 0, i, j;
+
+ for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
+ fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
+ for (j = 0; j < adev->uvd.num_enc_rings; ++j)
+ fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
+ }
if (fences == 0) {
if (adev->pm.dpm_enabled) {
@@ -1147,10 +1279,10 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
} else {
amdgpu_asic_set_uvd_clocks(adev, 0, 0);
/* shutdown the UVD block */
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_GATE);
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_GATE);
}
} else {
schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
@@ -1160,33 +1292,36 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
+ bool set_clocks;
if (amdgpu_sriov_vf(adev))
return;
+ set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
if (set_clocks) {
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_uvd(adev, true);
} else {
amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_CG_STATE_UNGATE);
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_CG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+ AMD_PG_STATE_UNGATE);
}
}
}
void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
{
- schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
+ if (!amdgpu_sriov_vf(ring->adev))
+ schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
}
/**
* amdgpu_uvd_ring_test_ib - test ib execution
*
* @ring: amdgpu_ring pointer
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
* Test if we can successfully execute an IB
*/
@@ -1195,28 +1330,26 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct dma_fence *fence;
long r;
- r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+ r = amdgpu_uvd_get_create_msg(ring, 1, &fence);
+ if (r)
+ goto error;
+
+ r = dma_fence_wait_timeout(fence, false, timeout);
+ dma_fence_put(fence);
+ if (r == 0)
+ r = -ETIMEDOUT;
+ if (r < 0)
goto error;
- }
r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
- if (r) {
- DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+ if (r)
goto error;
- }
r = dma_fence_wait_timeout(fence, false, timeout);
- if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
+ if (r == 0)
r = -ETIMEDOUT;
- } else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
- } else {
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ else if (r > 0)
r = 0;
- }
dma_fence_put(fence);
@@ -1233,7 +1366,7 @@ error:
*/
uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev)
{
- unsigned i;
+ unsigned int i;
uint32_t used_handles = 0;
for (i = 0; i < adev->uvd.max_handles; ++i) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 3553b92bf69a..9dfad2f48ef4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -31,30 +31,50 @@
#define AMDGPU_UVD_SESSION_SIZE (50*1024)
#define AMDGPU_UVD_FIRMWARE_OFFSET 256
-struct amdgpu_uvd {
+#define AMDGPU_MAX_UVD_INSTANCES 2
+
+#define AMDGPU_UVD_FIRMWARE_SIZE(adev) \
+ (AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(((const struct common_firmware_header *)(adev)->uvd.fw->data)->ucode_size_bytes) + \
+ 8) - AMDGPU_UVD_FIRMWARE_OFFSET)
+
+struct amdgpu_uvd_inst {
struct amdgpu_bo *vcpu_bo;
void *cpu_addr;
uint64_t gpu_addr;
- unsigned fw_version;
void *saved_bo;
- unsigned max_handles;
- atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
- struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
- struct delayed_work idle_work;
- const struct firmware *fw; /* UVD firmware */
struct amdgpu_ring ring;
struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
struct amdgpu_irq_src irq;
- bool address_64_bit;
- bool use_ctx_buf;
- struct amd_sched_entity entity;
- struct amd_sched_entity entity_enc;
uint32_t srbm_soft_reset;
+};
+
+#define AMDGPU_UVD_HARVEST_UVD0 (1 << 0)
+#define AMDGPU_UVD_HARVEST_UVD1 (1 << 1)
+
+struct amdgpu_uvd {
+ const struct firmware *fw; /* UVD firmware */
+ unsigned fw_version;
+ unsigned max_handles;
unsigned num_enc_rings;
+ uint8_t num_uvd_inst;
+ bool address_64_bit;
+ bool use_ctx_buf;
+ struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
+ struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
+ atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
+ struct drm_sched_entity entity;
+ struct delayed_work idle_work;
+ unsigned harvest_config;
+ /* store image width to adjust nb memory state */
+ unsigned decode_image_width;
+ uint32_t keyselect;
+ struct amdgpu_bo *ib_bo;
};
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
+int amdgpu_uvd_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring);
+int amdgpu_uvd_prepare_suspend(struct amdgpu_device *adev);
int amdgpu_uvd_suspend(struct amdgpu_device *adev);
int amdgpu_uvd_resume(struct amdgpu_device *adev);
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
@@ -63,7 +83,9 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct dma_fence **fence);
void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
struct drm_file *filp);
-int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
+int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib);
void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring);
int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 735c38d7db0d..ce318f5de047 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -27,12 +27,14 @@
#include <linux/firmware.h>
#include <linux/module.h>
-#include <drm/drmP.h>
+
#include <drm/drm.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_vce.h"
+#include "amdgpu_cs.h"
#include "cikd.h"
/* 1 second timeout */
@@ -40,21 +42,24 @@
/* Firmware Names */
#ifdef CONFIG_DRM_AMDGPU_CIK
-#define FIRMWARE_BONAIRE "radeon/bonaire_vce.bin"
-#define FIRMWARE_KABINI "radeon/kabini_vce.bin"
-#define FIRMWARE_KAVERI "radeon/kaveri_vce.bin"
-#define FIRMWARE_HAWAII "radeon/hawaii_vce.bin"
-#define FIRMWARE_MULLINS "radeon/mullins_vce.bin"
+#define FIRMWARE_BONAIRE "amdgpu/bonaire_vce.bin"
+#define FIRMWARE_KABINI "amdgpu/kabini_vce.bin"
+#define FIRMWARE_KAVERI "amdgpu/kaveri_vce.bin"
+#define FIRMWARE_HAWAII "amdgpu/hawaii_vce.bin"
+#define FIRMWARE_MULLINS "amdgpu/mullins_vce.bin"
#endif
#define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
#define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
#define FIRMWARE_FIJI "amdgpu/fiji_vce.bin"
#define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
#define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin"
-#define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
-#define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin"
+#define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
+#define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin"
+#define FIRMWARE_VEGAM "amdgpu/vegam_vce.bin"
#define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin"
+#define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin"
+#define FIRMWARE_VEGA20 "amdgpu/vega20_vce.bin"
#ifdef CONFIG_DRM_AMDGPU_CIK
MODULE_FIRMWARE(FIRMWARE_BONAIRE);
@@ -70,25 +75,31 @@ MODULE_FIRMWARE(FIRMWARE_STONEY);
MODULE_FIRMWARE(FIRMWARE_POLARIS10);
MODULE_FIRMWARE(FIRMWARE_POLARIS11);
MODULE_FIRMWARE(FIRMWARE_POLARIS12);
+MODULE_FIRMWARE(FIRMWARE_VEGAM);
MODULE_FIRMWARE(FIRMWARE_VEGA10);
+MODULE_FIRMWARE(FIRMWARE_VEGA12);
+MODULE_FIRMWARE(FIRMWARE_VEGA20);
static void amdgpu_vce_idle_work_handler(struct work_struct *work);
+static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct dma_fence **fence);
+static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+ bool direct, struct dma_fence **fence);
/**
- * amdgpu_vce_init - allocate memory, load vce firmware
+ * amdgpu_vce_sw_init - allocate memory, load vce firmware
*
* @adev: amdgpu_device pointer
+ * @size: size for the new BO
*
* First step to get VCE online, allocate memory and load the firmware
*/
int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
{
- struct amdgpu_ring *ring;
- struct amd_sched_rq *rq;
const char *fw_name;
const struct common_firmware_header *hdr;
- unsigned ucode_version, version_major, version_minor, binary_id;
+ unsigned int ucode_version, version_major, version_minor, binary_id;
int i, r;
switch (adev->asic_type) {
@@ -127,30 +138,31 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
case CHIP_POLARIS11:
fw_name = FIRMWARE_POLARIS11;
break;
+ case CHIP_POLARIS12:
+ fw_name = FIRMWARE_POLARIS12;
+ break;
+ case CHIP_VEGAM:
+ fw_name = FIRMWARE_VEGAM;
+ break;
case CHIP_VEGA10:
fw_name = FIRMWARE_VEGA10;
break;
- case CHIP_POLARIS12:
- fw_name = FIRMWARE_POLARIS12;
+ case CHIP_VEGA12:
+ fw_name = FIRMWARE_VEGA12;
+ break;
+ case CHIP_VEGA20:
+ fw_name = FIRMWARE_VEGA20;
break;
default:
return -EINVAL;
}
- r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
- if (r) {
- dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
- fw_name);
- return r;
- }
-
- r = amdgpu_ucode_validate(adev->vce.fw);
+ r = amdgpu_ucode_request(adev, &adev->vce.fw, AMDGPU_UCODE_REQUIRED, "%s", fw_name);
if (r) {
dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
fw_name);
- release_firmware(adev->vce.fw);
- adev->vce.fw = NULL;
+ amdgpu_ucode_release(&adev->vce.fw);
return r;
}
@@ -160,49 +172,21 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
version_major = (ucode_version >> 20) & 0xfff;
version_minor = (ucode_version >> 8) & 0xfff;
binary_id = ucode_version & 0xff;
- DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
+ DRM_INFO("Found VCE firmware Version: %d.%d Binary ID: %d\n",
version_major, version_minor, binary_id);
adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
(binary_id << 8));
- /* allocate firmware, stack and heap BO */
-
- r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, &adev->vce.vcpu_bo);
+ r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->vce.vcpu_bo,
+ &adev->vce.gpu_addr, &adev->vce.cpu_addr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
return r;
}
- r = amdgpu_bo_reserve(adev->vce.vcpu_bo, false);
- if (r) {
- amdgpu_bo_unref(&adev->vce.vcpu_bo);
- dev_err(adev->dev, "(%d) failed to reserve VCE bo\n", r);
- return r;
- }
-
- r = amdgpu_bo_pin(adev->vce.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->vce.gpu_addr);
- amdgpu_bo_unreserve(adev->vce.vcpu_bo);
- if (r) {
- amdgpu_bo_unref(&adev->vce.vcpu_bo);
- dev_err(adev->dev, "(%d) VCE bo pin failed\n", r);
- return r;
- }
-
-
- ring = &adev->vce.ring[0];
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
- r = amd_sched_entity_init(&ring->sched, &adev->vce.entity,
- rq, amdgpu_sched_jobs);
- if (r != 0) {
- DRM_ERROR("Failed setting up VCE run queue.\n");
- return r;
- }
-
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
atomic_set(&adev->vce.handles[i], 0);
adev->vce.filp[i] = NULL;
@@ -215,7 +199,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
}
/**
- * amdgpu_vce_fini - free memory
+ * amdgpu_vce_sw_fini - free memory
*
* @adev: amdgpu_device pointer
*
@@ -223,21 +207,47 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
*/
int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
{
- unsigned i;
+ unsigned int i;
if (adev->vce.vcpu_bo == NULL)
return 0;
- amd_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity);
-
- amdgpu_bo_unref(&adev->vce.vcpu_bo);
+ drm_sched_entity_destroy(&adev->vce.entity);
for (i = 0; i < adev->vce.num_rings; i++)
amdgpu_ring_fini(&adev->vce.ring[i]);
- release_firmware(adev->vce.fw);
+ amdgpu_ucode_release(&adev->vce.fw);
mutex_destroy(&adev->vce.idle_mutex);
+ amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
+ (void **)&adev->vce.cpu_addr);
+
+ return 0;
+}
+
+/**
+ * amdgpu_vce_entity_init - init entity
+ *
+ * @adev: amdgpu_device pointer
+ * @ring: amdgpu_ring pointer to check
+ *
+ * Initialize the entity used for handle management in the kernel driver.
+ */
+int amdgpu_vce_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+{
+ if (ring == &adev->vce.ring[0]) {
+ struct drm_gpu_scheduler *sched = &ring->sched;
+ int r;
+
+ r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL,
+ &sched, 1, NULL);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up VCE run queue.\n");
+ return r;
+ }
+ }
+
return 0;
}
@@ -251,6 +261,8 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
{
int i;
+ cancel_delayed_work_sync(&adev->vce.idle_work);
+
if (adev->vce.vcpu_bo == NULL)
return 0;
@@ -261,7 +273,6 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
if (i == AMDGPU_MAX_VCE_HANDLES)
return 0;
- cancel_delayed_work_sync(&adev->vce.idle_work);
/* TODO: suspending running encoding sessions isn't supported */
return -EINVAL;
}
@@ -276,8 +287,8 @@ int amdgpu_vce_resume(struct amdgpu_device *adev)
{
void *cpu_addr;
const struct common_firmware_header *hdr;
- unsigned offset;
- int r;
+ unsigned int offset;
+ int r, idx;
if (adev->vce.vcpu_bo == NULL)
return -EINVAL;
@@ -297,8 +308,12 @@ int amdgpu_vce_resume(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->vce.fw->data;
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
- adev->vce.fw->size - offset);
+
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
+ adev->vce.fw->size - offset);
+ drm_dev_exit(idx);
+ }
amdgpu_bo_kunmap(adev->vce.vcpu_bo);
@@ -318,10 +333,7 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
container_of(work, struct amdgpu_device, vce.idle_work.work);
- unsigned i, count = 0;
-
- if (amdgpu_sriov_vf(adev))
- return;
+ unsigned int i, count = 0;
for (i = 0; i < adev->vce.num_rings; i++)
count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
@@ -331,10 +343,10 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
amdgpu_dpm_enable_vce(adev, false);
} else {
amdgpu_asic_set_vce_clocks(adev, 0, 0);
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
}
} else {
schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
@@ -363,10 +375,10 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
amdgpu_dpm_enable_vce(adev, true);
} else {
amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
- amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_UNGATE);
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_UNGATE);
}
}
@@ -382,7 +394,8 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
*/
void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
{
- schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
+ if (!amdgpu_sriov_vf(ring->adev))
+ schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
}
/**
@@ -397,6 +410,7 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
{
struct amdgpu_ring *ring = &adev->vce.ring[0];
int i, r;
+
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
uint32_t handle = atomic_read(&adev->vce.handles[i]);
@@ -415,30 +429,41 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
/**
* amdgpu_vce_get_create_msg - generate a VCE create msg
*
- * @adev: amdgpu_device pointer
* @ring: ring we should submit the msg to
* @handle: VCE session handle to use
* @fence: optional fence to return
*
* Open up a stream for HW test
*/
-int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct dma_fence **fence)
+static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct dma_fence **fence)
{
- const unsigned ib_size_dw = 1024;
+ const unsigned int ib_size_dw = 1024;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
+ struct amdgpu_ib ib_msg;
struct dma_fence *f = NULL;
- uint64_t dummy;
+ uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
- ib = &job->ibs[0];
+ memset(&ib_msg, 0, sizeof(ib_msg));
+ /* only one gpu page is needed, alloc +1 page to make addr aligned. */
+ r = amdgpu_ib_get(ring->adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
+ AMDGPU_IB_POOL_DIRECT,
+ &ib_msg);
+ if (r)
+ goto err;
- dummy = ib->gpu_addr + 1024;
+ ib = &job->ibs[0];
+ /* let addr point to page boundary */
+ addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg.gpu_addr);
/* stitch together an VCE create msg */
ib->length_dw = 0;
@@ -470,19 +495,18 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[ib->length_dw++] = 0x00000014; /* len */
ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
- ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
- ib->ptr[ib->length_dw++] = dummy;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
ib->ptr[ib->length_dw++] = 0x00000001;
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
+ r = amdgpu_job_submit_direct(job, ring, &f);
+ amdgpu_ib_free(&ib_msg, f);
if (r)
goto err;
- amdgpu_job_free(job);
if (fence)
*fence = dma_fence_get(f);
dma_fence_put(f);
@@ -496,23 +520,28 @@ err:
/**
* amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
*
- * @adev: amdgpu_device pointer
* @ring: ring we should submit the msg to
* @handle: VCE session handle to use
+ * @direct: direct or delayed pool
* @fence: optional fence to return
*
* Close up a stream for HW test or if userspace failed to do so
*/
-int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct dma_fence **fence)
+static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+ bool direct, struct dma_fence **fence)
{
- const unsigned ib_size_dw = 1024;
+ const unsigned int ib_size_dw = 1024;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
struct dma_fence *f = NULL;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED,
+ ib_size_dw * 4,
+ direct ? AMDGPU_IB_POOL_DIRECT :
+ AMDGPU_IB_POOL_DELAYED, &job,
+ AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
if (r)
return r;
@@ -539,19 +568,12 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
- if (direct) {
- r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
- job->fence = dma_fence_get(f);
- if (r)
- goto err;
-
- amdgpu_job_free(job);
- } else {
- r = amdgpu_job_submit(job, ring, &ring->adev->vce.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, &f);
- if (r)
- goto err;
- }
+ if (direct)
+ r = amdgpu_job_submit_direct(job, ring, &f);
+ else
+ f = amdgpu_job_submit(job);
+ if (r)
+ goto err;
if (fence)
*fence = dma_fence_get(f);
@@ -564,39 +586,93 @@ err:
}
/**
+ * amdgpu_vce_validate_bo - make sure not to cross 4GB boundary
+ *
+ * @p: cs parser
+ * @ib: indirect buffer to use
+ * @lo: address of lower dword
+ * @hi: address of higher dword
+ * @size: minimum size
+ * @index: bs/fb index
+ *
+ * Make sure that no BO cross a 4GB boundary.
+ */
+static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p,
+ struct amdgpu_ib *ib, int lo, int hi,
+ unsigned int size, int32_t index)
+{
+ int64_t offset = ((uint64_t)size) * ((int64_t)index);
+ struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_bo_va_mapping *mapping;
+ unsigned int i, fpfn, lpfn;
+ struct amdgpu_bo *bo;
+ uint64_t addr;
+ int r;
+
+ addr = ((uint64_t)amdgpu_ib_get_value(ib, lo)) |
+ ((uint64_t)amdgpu_ib_get_value(ib, hi)) << 32;
+ if (index >= 0) {
+ addr += offset;
+ fpfn = PAGE_ALIGN(offset) >> PAGE_SHIFT;
+ lpfn = 0x100000000ULL >> PAGE_SHIFT;
+ } else {
+ fpfn = 0;
+ lpfn = (0x100000000ULL - PAGE_ALIGN(offset)) >> PAGE_SHIFT;
+ }
+
+ r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
+ if (r) {
+ DRM_ERROR("Can't find BO for addr 0x%010llx %d %d %d %d\n",
+ addr, lo, hi, size, index);
+ return r;
+ }
+
+ for (i = 0; i < bo->placement.num_placement; ++i) {
+ bo->placements[i].fpfn = max(bo->placements[i].fpfn, fpfn);
+ bo->placements[i].lpfn = bo->placements[i].lpfn ?
+ min(bo->placements[i].lpfn, lpfn) : lpfn;
+ }
+ return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+}
+
+
+/**
* amdgpu_vce_cs_reloc - command submission relocation
*
* @p: parser context
+ * @ib: indirect buffer to use
* @lo: address of lower dword
* @hi: address of higher dword
* @size: minimum size
+ * @index: bs/fb index
*
* Patch relocation inside command stream with real buffer address
*/
-static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
- int lo, int hi, unsigned size, uint32_t index)
+static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, struct amdgpu_ib *ib,
+ int lo, int hi, unsigned int size, uint32_t index)
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo;
uint64_t addr;
+ int r;
if (index == 0xffffffff)
index = 0;
- addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) |
- ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
+ addr = ((uint64_t)amdgpu_ib_get_value(ib, lo)) |
+ ((uint64_t)amdgpu_ib_get_value(ib, hi)) << 32;
addr += ((uint64_t)size) * ((uint64_t)index);
- mapping = amdgpu_cs_find_mapping(p, addr, &bo);
- if (mapping == NULL) {
- DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
+ r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
+ if (r) {
+ DRM_ERROR("Can't find BO for addr 0x%010llx %d %d %d %d\n",
addr, lo, hi, size, index);
- return -EINVAL;
+ return r;
}
if ((addr + (uint64_t)size) >
(mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
- DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
+ DRM_ERROR("BO too small for addr 0x%010llx %d %d\n",
addr, lo, hi);
return -EINVAL;
}
@@ -605,8 +681,8 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
addr += amdgpu_bo_gpu_offset(bo);
addr -= ((uint64_t)size) * ((uint64_t)index);
- amdgpu_set_ib_value(p, ib_idx, lo, lower_32_bits(addr));
- amdgpu_set_ib_value(p, ib_idx, hi, upper_32_bits(addr));
+ amdgpu_ib_set_value(ib, lo, lower_32_bits(addr));
+ amdgpu_ib_set_value(ib, hi, upper_32_bits(addr));
return 0;
}
@@ -619,12 +695,12 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
* @allocated: allocated a new handle?
*
* Validates the handle and return the found session index or -EINVAL
- * we we don't have another free session index.
+ * we don't have another free session index.
*/
static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
uint32_t handle, uint32_t *allocated)
{
- unsigned i;
+ unsigned int i;
/* validate the handle */
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
@@ -652,33 +728,32 @@ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
}
/**
- * amdgpu_vce_cs_parse - parse and validate the command stream
+ * amdgpu_vce_ring_parse_cs - parse and validate the command stream
*
* @p: parser context
- *
+ * @job: the job to parse
+ * @ib: the IB to patch
*/
-int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
+int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
{
- struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
- unsigned fb_idx = 0, bs_idx = 0;
+ unsigned int fb_idx = 0, bs_idx = 0;
int session_idx = -1;
uint32_t destroyed = 0;
uint32_t created = 0;
uint32_t allocated = 0;
uint32_t tmp, handle = 0;
- uint32_t *size = &tmp;
- int i, r, idx = 0;
-
- p->job->vm = NULL;
- ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
+ uint32_t dummy = 0xffffffff;
+ uint32_t *size = &dummy;
+ unsigned int idx;
+ int i, r = 0;
- r = amdgpu_cs_sysvm_access_required(p);
- if (r)
- return r;
+ job->vm = NULL;
- while (idx < ib->length_dw) {
- uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
- uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
+ for (idx = 0; idx < ib->length_dw;) {
+ uint32_t len = amdgpu_ib_get_value(ib, idx);
+ uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
if ((len < 8) || (len & 3)) {
DRM_ERROR("invalid VCE command length (%d)!\n", len);
@@ -687,8 +762,68 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
}
switch (cmd) {
+ case 0x00000002: /* task info */
+ fb_idx = amdgpu_ib_get_value(ib, idx + 6);
+ bs_idx = amdgpu_ib_get_value(ib, idx + 7);
+ break;
+
+ case 0x03000001: /* encode */
+ r = amdgpu_vce_validate_bo(p, ib, idx + 10, idx + 9,
+ 0, 0);
+ if (r)
+ goto out;
+
+ r = amdgpu_vce_validate_bo(p, ib, idx + 12, idx + 11,
+ 0, 0);
+ if (r)
+ goto out;
+ break;
+
+ case 0x05000001: /* context buffer */
+ r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
+ 0, 0);
+ if (r)
+ goto out;
+ break;
+
+ case 0x05000004: /* video bitstream buffer */
+ tmp = amdgpu_ib_get_value(ib, idx + 4);
+ r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
+ tmp, bs_idx);
+ if (r)
+ goto out;
+ break;
+
+ case 0x05000005: /* feedback buffer */
+ r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
+ 4096, fb_idx);
+ if (r)
+ goto out;
+ break;
+
+ case 0x0500000d: /* MV buffer */
+ r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
+ 0, 0);
+ if (r)
+ goto out;
+
+ r = amdgpu_vce_validate_bo(p, ib, idx + 8, idx + 7,
+ 0, 0);
+ if (r)
+ goto out;
+ break;
+ }
+
+ idx += len / 4;
+ }
+
+ for (idx = 0; idx < ib->length_dw;) {
+ uint32_t len = amdgpu_ib_get_value(ib, idx);
+ uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
+
+ switch (cmd) {
case 0x00000001: /* session */
- handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
+ handle = amdgpu_ib_get_value(ib, idx + 2);
session_idx = amdgpu_vce_validate_handle(p, handle,
&allocated);
if (session_idx < 0) {
@@ -699,8 +834,8 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
break;
case 0x00000002: /* task info */
- fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
- bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
+ fb_idx = amdgpu_ib_get_value(ib, idx + 6);
+ bs_idx = amdgpu_ib_get_value(ib, idx + 7);
break;
case 0x01000001: /* create */
@@ -715,8 +850,8 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
goto out;
}
- *size = amdgpu_get_ib_value(p, ib_idx, idx + 8) *
- amdgpu_get_ib_value(p, ib_idx, idx + 10) *
+ *size = amdgpu_ib_get_value(ib, idx + 8) *
+ amdgpu_ib_get_value(ib, idx + 10) *
8 * 3 / 2;
break;
@@ -745,12 +880,12 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
break;
case 0x03000001: /* encode */
- r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
+ r = amdgpu_vce_cs_reloc(p, ib, idx + 10, idx + 9,
*size, 0);
if (r)
goto out;
- r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11,
+ r = amdgpu_vce_cs_reloc(p, ib, idx + 12, idx + 11,
*size / 3, 0);
if (r)
goto out;
@@ -761,27 +896,39 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
break;
case 0x05000001: /* context buffer */
- r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
+ r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
*size * 2, 0);
if (r)
goto out;
break;
case 0x05000004: /* video bitstream buffer */
- tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
- r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
+ tmp = amdgpu_ib_get_value(ib, idx + 4);
+ r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
tmp, bs_idx);
if (r)
goto out;
break;
case 0x05000005: /* feedback buffer */
- r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
+ r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
4096, fb_idx);
if (r)
goto out;
break;
+ case 0x0500000d: /* MV buffer */
+ r = amdgpu_vce_cs_reloc(p, ib, idx + 3,
+ idx + 2, *size, 0);
+ if (r)
+ goto out;
+
+ r = amdgpu_vce_cs_reloc(p, ib, idx + 8,
+ idx + 7, *size / 12, 0);
+ if (r)
+ goto out;
+ break;
+
default:
DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
r = -EINVAL;
@@ -819,14 +966,16 @@ out:
}
/**
- * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode
+ * amdgpu_vce_ring_parse_cs_vm - parse the command stream in VM mode
*
* @p: parser context
- *
+ * @job: the job to parse
+ * @ib: the IB to patch
*/
-int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
+int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib)
{
- struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
int session_idx = -1;
uint32_t destroyed = 0;
uint32_t created = 0;
@@ -835,8 +984,8 @@ int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
int i, r = 0, idx = 0;
while (idx < ib->length_dw) {
- uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
- uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
+ uint32_t len = amdgpu_ib_get_value(ib, idx);
+ uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
if ((len < 8) || (len & 3)) {
DRM_ERROR("invalid VCE command length (%d)!\n", len);
@@ -846,7 +995,7 @@ int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx)
switch (cmd) {
case 0x00000001: /* session */
- handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
+ handle = amdgpu_ib_get_value(ib, idx + 2);
session_idx = amdgpu_vce_validate_handle(p, handle,
&allocated);
if (session_idx < 0) {
@@ -895,7 +1044,6 @@ out:
if (!r) {
/* No error, free all destroyed handle slots */
tmp = destroyed;
- amdgpu_ib_free(p->adev, ib, NULL);
} else {
/* Error during parsing, free all allocated handle slots */
tmp = allocated;
@@ -912,11 +1060,15 @@ out:
* amdgpu_vce_ring_emit_ib - execute indirect buffer
*
* @ring: engine to use
+ * @job: job to retrieve vmid from
* @ib: the IB to execute
+ * @flags: unused
*
*/
-void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags)
{
amdgpu_ring_write(ring, VCE_CMD_IB);
amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
@@ -928,11 +1080,13 @@ void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
* amdgpu_vce_ring_emit_fence - add a fence command to the ring
*
* @ring: engine to use
- * @fence: the fence
+ * @addr: address
+ * @seq: sequence number
+ * @flags: fence related flags
*
*/
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
- unsigned flags)
+ unsigned int flags)
{
WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
@@ -953,37 +1107,31 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t rptr = amdgpu_ring_get_rptr(ring);
- unsigned i;
+ uint32_t rptr;
+ unsigned int i;
int r, timeout = adev->usec_timeout;
- /* workaround VCE ring test slow issue for sriov*/
+ /* skip ring test for sriov*/
if (amdgpu_sriov_vf(adev))
- timeout *= 10;
+ return 0;
r = amdgpu_ring_alloc(ring, 16);
- if (r) {
- DRM_ERROR("amdgpu: vce failed to lock ring %d (%d).\n",
- ring->idx, r);
+ if (r)
return r;
- }
+
+ rptr = amdgpu_ring_get_rptr(ring);
+
amdgpu_ring_write(ring, VCE_CMD_END);
amdgpu_ring_commit(ring);
for (i = 0; i < timeout; i++) {
if (amdgpu_ring_get_rptr(ring) != rptr)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
- if (i < timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed\n",
- ring->idx);
+ if (i >= timeout)
r = -ETIMEDOUT;
- }
return r;
}
@@ -992,6 +1140,7 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
* amdgpu_vce_ring_test_ib - test if VCE IBs are working
*
* @ring: the engine to test on
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
*/
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
@@ -1004,28 +1153,34 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
return 0;
r = amdgpu_vce_get_create_msg(ring, 1, NULL);
- if (r) {
- DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
+ if (r)
goto error;
- }
r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
- if (r) {
- DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
+ if (r)
goto error;
- }
r = dma_fence_wait_timeout(fence, false, timeout);
- if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
+ if (r == 0)
r = -ETIMEDOUT;
- } else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
- } else {
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ else if (r > 0)
r = 0;
- }
+
error:
dma_fence_put(fence);
return r;
}
+
+enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring)
+{
+ switch (ring) {
+ case 0:
+ return AMDGPU_RING_PRIO_0;
+ case 1:
+ return AMDGPU_RING_PRIO_1;
+ case 2:
+ return AMDGPU_RING_PRIO_2;
+ default:
+ return AMDGPU_RING_PRIO_0;
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 0a7f18c461e4..6e53f872d084 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -30,9 +30,13 @@
#define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
#define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
+#define AMDGPU_VCE_FW_53_45 ((53 << 24) | (45 << 16))
+
struct amdgpu_vce {
struct amdgpu_bo *vcpu_bo;
uint64_t gpu_addr;
+ void *cpu_addr;
+ void *saved_bo;
unsigned fw_version;
unsigned fb_version;
atomic_t handles[AMDGPU_MAX_VCE_HANDLES];
@@ -44,24 +48,24 @@ struct amdgpu_vce {
struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
struct amdgpu_irq_src irq;
unsigned harvest_config;
- struct amd_sched_entity entity;
+ struct drm_sched_entity entity;
uint32_t srbm_soft_reset;
unsigned num_rings;
};
int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size);
int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
+int amdgpu_vce_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring);
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
-int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct dma_fence **fence);
-int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- bool direct, struct dma_fence **fence);
void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
-int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx);
-int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx);
-void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch);
+int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
+ struct amdgpu_ib *ib);
+int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib);
+void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
+ struct amdgpu_ib *ib, uint32_t flags);
void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
unsigned flags);
int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
@@ -70,5 +74,6 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring);
unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring);
unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring);
+enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
new file mode 100644
index 000000000000..5e0786ea911b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -0,0 +1,1637 @@
+/*
+ * Copyright 2016-2024 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/dmi.h>
+#include <linux/pci.h>
+#include <linux/debugfs.h>
+#include <drm/drm_drv.h>
+
+#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_vcn.h"
+#include "soc15d.h"
+
+/* Firmware Names */
+#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
+#define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
+#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
+#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin"
+#define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin"
+#define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin"
+#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
+#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
+#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin"
+#define FIRMWARE_SIENNA_CICHLID "amdgpu/sienna_cichlid_vcn.bin"
+#define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin"
+#define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin"
+#define FIRMWARE_DIMGREY_CAVEFISH "amdgpu/dimgrey_cavefish_vcn.bin"
+#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin"
+#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin"
+#define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin"
+#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin"
+#define FIRMWARE_VCN4_0_0 "amdgpu/vcn_4_0_0.bin"
+#define FIRMWARE_VCN4_0_2 "amdgpu/vcn_4_0_2.bin"
+#define FIRMWARE_VCN4_0_3 "amdgpu/vcn_4_0_3.bin"
+#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin"
+#define FIRMWARE_VCN4_0_5 "amdgpu/vcn_4_0_5.bin"
+#define FIRMWARE_VCN4_0_6 "amdgpu/vcn_4_0_6.bin"
+#define FIRMWARE_VCN4_0_6_1 "amdgpu/vcn_4_0_6_1.bin"
+#define FIRMWARE_VCN5_0_0 "amdgpu/vcn_5_0_0.bin"
+#define FIRMWARE_VCN5_0_1 "amdgpu/vcn_5_0_1.bin"
+
+MODULE_FIRMWARE(FIRMWARE_RAVEN);
+MODULE_FIRMWARE(FIRMWARE_PICASSO);
+MODULE_FIRMWARE(FIRMWARE_RAVEN2);
+MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
+MODULE_FIRMWARE(FIRMWARE_RENOIR);
+MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
+MODULE_FIRMWARE(FIRMWARE_ALDEBARAN);
+MODULE_FIRMWARE(FIRMWARE_NAVI10);
+MODULE_FIRMWARE(FIRMWARE_NAVI14);
+MODULE_FIRMWARE(FIRMWARE_NAVI12);
+MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID);
+MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER);
+MODULE_FIRMWARE(FIRMWARE_VANGOGH);
+MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH);
+MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY);
+MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP);
+MODULE_FIRMWARE(FIRMWARE_VCN_3_1_2);
+MODULE_FIRMWARE(FIRMWARE_VCN4_0_0);
+MODULE_FIRMWARE(FIRMWARE_VCN4_0_2);
+MODULE_FIRMWARE(FIRMWARE_VCN4_0_3);
+MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
+MODULE_FIRMWARE(FIRMWARE_VCN4_0_5);
+MODULE_FIRMWARE(FIRMWARE_VCN4_0_6);
+MODULE_FIRMWARE(FIRMWARE_VCN4_0_6_1);
+MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
+MODULE_FIRMWARE(FIRMWARE_VCN5_0_1);
+
+static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
+static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev);
+
+int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i)
+{
+ char ucode_prefix[25];
+ int r;
+
+ adev->vcn.inst[i].adev = adev;
+ adev->vcn.inst[i].inst = i;
+ amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
+
+ if (i != 0 && adev->vcn.per_inst_fw) {
+ r = amdgpu_ucode_request(adev, &adev->vcn.inst[i].fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_%d.bin", ucode_prefix, i);
+ if (r)
+ amdgpu_ucode_release(&adev->vcn.inst[i].fw);
+ } else {
+ if (!adev->vcn.inst[0].fw) {
+ r = amdgpu_ucode_request(adev, &adev->vcn.inst[0].fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", ucode_prefix);
+ if (r)
+ amdgpu_ucode_release(&adev->vcn.inst[0].fw);
+ } else {
+ r = 0;
+ }
+ adev->vcn.inst[i].fw = adev->vcn.inst[0].fw;
+ }
+
+ return r;
+}
+
+int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i)
+{
+ unsigned long bo_size;
+ const struct common_firmware_header *hdr;
+ unsigned char fw_check;
+ unsigned int fw_shared_size, log_offset;
+ int r;
+
+ mutex_init(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
+ mutex_init(&adev->vcn.inst[i].vcn_pg_lock);
+ mutex_init(&adev->vcn.inst[i].engine_reset_mutex);
+ atomic_set(&adev->vcn.inst[i].total_submission_cnt, 0);
+ INIT_DELAYED_WORK(&adev->vcn.inst[i].idle_work, amdgpu_vcn_idle_work_handler);
+ atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
+ if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
+ adev->vcn.inst[i].indirect_sram = true;
+
+ /*
+ * Some Steam Deck's BIOS versions are incompatible with the
+ * indirect SRAM mode, leading to amdgpu being unable to get
+ * properly probed (and even potentially crashing the kernel).
+ * Hence, check for these versions here - notice this is
+ * restricted to Vangogh (Deck's APU).
+ */
+ if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(3, 0, 2)) {
+ const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION);
+
+ if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) ||
+ !strncmp("F7A0114", bios_ver, 7))) {
+ adev->vcn.inst[i].indirect_sram = false;
+ dev_info(adev->dev,
+ "Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver);
+ }
+ }
+
+ /* from vcn4 and above, only unified queue is used */
+ adev->vcn.inst[i].using_unified_queue =
+ amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0);
+
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
+ adev->vcn.inst[i].fw_version = le32_to_cpu(hdr->ucode_version);
+ adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
+
+ /* Bit 20-23, it is encode major and non-zero for new naming convention.
+ * This field is part of version minor and DRM_DISABLED_FLAG in old naming
+ * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
+ * is zero in old naming convention, this field is always zero so far.
+ * These four bits are used to tell which naming convention is present.
+ */
+ fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
+ if (fw_check) {
+ unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
+
+ fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
+ enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
+ enc_major = fw_check;
+ dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
+ vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
+ dev_info(adev->dev,
+ "[VCN instance %d] Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
+ i, enc_major, enc_minor, dec_ver, vep, fw_rev);
+ } else {
+ unsigned int version_major, version_minor, family_id;
+
+ family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
+ version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
+ version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+ dev_info(adev->dev, "[VCN instance %d] Found VCN firmware Version: %u.%u Family ID: %u\n",
+ i, version_major, version_minor, family_id);
+ }
+
+ bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE;
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
+
+ if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(5, 0, 0)) {
+ fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared));
+ log_offset = offsetof(struct amdgpu_vcn5_fw_shared, fw_log);
+ } else if (amdgpu_ip_version(adev, UVD_HWIP, 0) >= IP_VERSION(4, 0, 0)) {
+ fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared));
+ log_offset = offsetof(struct amdgpu_vcn4_fw_shared, fw_log);
+ } else {
+ fw_shared_size = AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared));
+ log_offset = offsetof(struct amdgpu_fw_shared, fw_log);
+ }
+
+ bo_size += fw_shared_size;
+
+ if (amdgpu_vcnfw_log)
+ bo_size += AMDGPU_VCNFW_LOG_SIZE;
+
+ r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->vcn.inst[i].vcpu_bo,
+ &adev->vcn.inst[i].gpu_addr,
+ &adev->vcn.inst[i].cpu_addr);
+ if (r) {
+ dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
+ return r;
+ }
+
+ adev->vcn.inst[i].fw_shared.cpu_addr = adev->vcn.inst[i].cpu_addr +
+ bo_size - fw_shared_size;
+ adev->vcn.inst[i].fw_shared.gpu_addr = adev->vcn.inst[i].gpu_addr +
+ bo_size - fw_shared_size;
+
+ adev->vcn.inst[i].fw_shared.mem_size = fw_shared_size;
+
+ if (amdgpu_vcnfw_log) {
+ adev->vcn.inst[i].fw_shared.cpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
+ adev->vcn.inst[i].fw_shared.gpu_addr -= AMDGPU_VCNFW_LOG_SIZE;
+ adev->vcn.inst[i].fw_shared.log_offset = log_offset;
+ }
+
+ if (adev->vcn.inst[i].indirect_sram) {
+ r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->vcn.inst[i].dpg_sram_bo,
+ &adev->vcn.inst[i].dpg_sram_gpu_addr,
+ &adev->vcn.inst[i].dpg_sram_cpu_addr);
+ if (r) {
+ dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+void amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i)
+{
+ int j;
+
+ if (adev->vcn.harvest_config & (1 << i))
+ return;
+
+ amdgpu_bo_free_kernel(
+ &adev->vcn.inst[i].dpg_sram_bo,
+ &adev->vcn.inst[i].dpg_sram_gpu_addr,
+ (void **)&adev->vcn.inst[i].dpg_sram_cpu_addr);
+
+ kvfree(adev->vcn.inst[i].saved_bo);
+
+ amdgpu_bo_free_kernel(&adev->vcn.inst[i].vcpu_bo,
+ &adev->vcn.inst[i].gpu_addr,
+ (void **)&adev->vcn.inst[i].cpu_addr);
+
+ amdgpu_ring_fini(&adev->vcn.inst[i].ring_dec);
+
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j)
+ amdgpu_ring_fini(&adev->vcn.inst[i].ring_enc[j]);
+
+ if (adev->vcn.per_inst_fw) {
+ amdgpu_ucode_release(&adev->vcn.inst[i].fw);
+ } else {
+ amdgpu_ucode_release(&adev->vcn.inst[0].fw);
+ adev->vcn.inst[i].fw = NULL;
+ }
+
+ if (adev->vcn.reg_list)
+ amdgpu_vcn_reg_dump_fini(adev);
+
+ mutex_destroy(&adev->vcn.inst[i].vcn_pg_lock);
+ mutex_destroy(&adev->vcn.inst[i].vcn1_jpeg1_workaround);
+}
+
+bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance)
+{
+ bool ret = false;
+ int vcn_config = adev->vcn.inst[vcn_instance].vcn_config;
+
+ if ((type == VCN_ENCODE_RING) && (vcn_config & VCN_BLOCK_ENCODE_DISABLE_MASK))
+ ret = true;
+ else if ((type == VCN_DECODE_RING) && (vcn_config & VCN_BLOCK_DECODE_DISABLE_MASK))
+ ret = true;
+ else if ((type == VCN_UNIFIED_RING) && (vcn_config & VCN_BLOCK_QUEUE_DISABLE_MASK))
+ ret = true;
+
+ return ret;
+}
+
+static int amdgpu_vcn_save_vcpu_bo_inst(struct amdgpu_device *adev, int i)
+{
+ unsigned int size;
+ void *ptr;
+ int idx;
+
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
+ if (adev->vcn.inst[i].vcpu_bo == NULL)
+ return 0;
+
+ size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
+ ptr = adev->vcn.inst[i].cpu_addr;
+
+ adev->vcn.inst[i].saved_bo = kvmalloc(size, GFP_KERNEL);
+ if (!adev->vcn.inst[i].saved_bo)
+ return -ENOMEM;
+
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
+ drm_dev_exit(idx);
+ }
+
+ return 0;
+}
+
+int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev)
+{
+ int ret, i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ret = amdgpu_vcn_save_vcpu_bo_inst(adev, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i)
+{
+ bool in_ras_intr = amdgpu_ras_intr_triggered();
+
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
+
+ /* err_event_athub and dpc recovery will corrupt VCPU buffer, so we need to
+ * restore fw data and clear buffer in amdgpu_vcn_resume() */
+ if (in_ras_intr || adev->pcie_reset_ctx.in_link_reset)
+ return 0;
+
+ return amdgpu_vcn_save_vcpu_bo_inst(adev, i);
+}
+
+int amdgpu_vcn_resume(struct amdgpu_device *adev, int i)
+{
+ unsigned int size;
+ void *ptr;
+ int idx;
+
+ if (adev->vcn.harvest_config & (1 << i))
+ return 0;
+ if (adev->vcn.inst[i].vcpu_bo == NULL)
+ return -EINVAL;
+
+ size = amdgpu_bo_size(adev->vcn.inst[i].vcpu_bo);
+ ptr = adev->vcn.inst[i].cpu_addr;
+
+ if (adev->vcn.inst[i].saved_bo != NULL) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
+ drm_dev_exit(idx);
+ }
+ kvfree(adev->vcn.inst[i].saved_bo);
+ adev->vcn.inst[i].saved_bo = NULL;
+ } else {
+ const struct common_firmware_header *hdr;
+ unsigned int offset;
+
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ memcpy_toio(adev->vcn.inst[i].cpu_addr,
+ adev->vcn.inst[i].fw->data + offset,
+ le32_to_cpu(hdr->ucode_size_bytes));
+ drm_dev_exit(idx);
+ }
+ size -= le32_to_cpu(hdr->ucode_size_bytes);
+ ptr += le32_to_cpu(hdr->ucode_size_bytes);
+ }
+ memset_io(ptr, 0, size);
+ }
+
+ return 0;
+}
+
+void amdgpu_vcn_get_profile(struct amdgpu_device *adev)
+{
+ int r;
+
+ mutex_lock(&adev->vcn.workload_profile_mutex);
+
+ if (adev->vcn.workload_profile_active) {
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+ return;
+ }
+ r = amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_VIDEO,
+ true);
+ if (r)
+ dev_warn(adev->dev,
+ "(%d) failed to enable video power profile mode\n", r);
+ else
+ adev->vcn.workload_profile_active = true;
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+}
+
+void amdgpu_vcn_put_profile(struct amdgpu_device *adev)
+{
+ bool pg = true;
+ int r, i;
+
+ mutex_lock(&adev->vcn.workload_profile_mutex);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.inst[i].cur_state != AMD_PG_STATE_GATE) {
+ pg = false;
+ break;
+ }
+ }
+
+ if (pg) {
+ r = amdgpu_dpm_switch_power_profile(
+ adev, PP_SMC_POWER_PROFILE_VIDEO, false);
+ if (r)
+ dev_warn(
+ adev->dev,
+ "(%d) failed to disable video power profile mode\n",
+ r);
+ else
+ adev->vcn.workload_profile_active = false;
+ }
+
+ mutex_unlock(&adev->vcn.workload_profile_mutex);
+}
+
+static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
+{
+ struct amdgpu_vcn_inst *vcn_inst =
+ container_of(work, struct amdgpu_vcn_inst, idle_work.work);
+ struct amdgpu_device *adev = vcn_inst->adev;
+ unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0};
+ unsigned int i = vcn_inst->inst, j;
+
+ if (adev->vcn.harvest_config & (1 << i))
+ return;
+
+ for (j = 0; j < adev->vcn.inst[i].num_enc_rings; ++j)
+ fence[i] += amdgpu_fence_count_emitted(&vcn_inst->ring_enc[j]);
+
+ /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+ !adev->vcn.inst[i].using_unified_queue) {
+ struct dpg_pause_state new_state;
+
+ if (fence[i] ||
+ unlikely(atomic_read(&vcn_inst->dpg_enc_submission_cnt)))
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+
+ adev->vcn.inst[i].pause_dpg_mode(vcn_inst, &new_state);
+ }
+
+ fence[i] += amdgpu_fence_count_emitted(&vcn_inst->ring_dec);
+ fences += fence[i];
+
+ if (!fences && !atomic_read(&vcn_inst->total_submission_cnt)) {
+ mutex_lock(&vcn_inst->vcn_pg_lock);
+ vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_GATE);
+ mutex_unlock(&vcn_inst->vcn_pg_lock);
+ amdgpu_vcn_put_profile(adev);
+
+ } else {
+ schedule_delayed_work(&vcn_inst->idle_work, VCN_IDLE_TIMEOUT);
+ }
+}
+
+void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vcn_inst *vcn_inst = &adev->vcn.inst[ring->me];
+
+ atomic_inc(&vcn_inst->total_submission_cnt);
+
+ cancel_delayed_work_sync(&vcn_inst->idle_work);
+
+ mutex_lock(&vcn_inst->vcn_pg_lock);
+ vcn_inst->set_pg_state(vcn_inst, AMD_PG_STATE_UNGATE);
+
+ /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+ !vcn_inst->using_unified_queue) {
+ struct dpg_pause_state new_state;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
+ atomic_inc(&vcn_inst->dpg_enc_submission_cnt);
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ } else {
+ unsigned int fences = 0;
+ unsigned int i;
+
+ for (i = 0; i < vcn_inst->num_enc_rings; ++i)
+ fences += amdgpu_fence_count_emitted(&vcn_inst->ring_enc[i]);
+
+ if (fences || atomic_read(&vcn_inst->dpg_enc_submission_cnt))
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+ }
+
+ vcn_inst->pause_dpg_mode(vcn_inst, &new_state);
+ }
+ mutex_unlock(&vcn_inst->vcn_pg_lock);
+ amdgpu_vcn_get_profile(adev);
+}
+
+void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ /* Only set DPG pause for VCN3 or below, VCN4 and above will be handled by FW */
+ if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC &&
+ !adev->vcn.inst[ring->me].using_unified_queue)
+ atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
+
+ atomic_dec(&ring->adev->vcn.inst[ring->me].total_submission_cnt);
+
+ schedule_delayed_work(&ring->adev->vcn.inst[ring->me].idle_work,
+ VCN_IDLE_TIMEOUT);
+}
+
+int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t tmp = 0;
+ unsigned int i;
+ int r;
+
+ /* VCN in SRIOV does not support direct register read/write */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
+ r = amdgpu_ring_alloc(ring, 3);
+ if (r)
+ return r;
+ amdgpu_ring_write(ring, PACKET0(adev->vcn.inst[ring->me].internal.scratch9, 0));
+ amdgpu_ring_write(ring, 0xDEADBEEF);
+ amdgpu_ring_commit(ring);
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(adev->vcn.inst[ring->me].external.scratch9);
+ if (tmp == 0xDEADBEEF)
+ break;
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
+ return r;
+}
+
+int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t rptr;
+ unsigned int i;
+ int r;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ r = amdgpu_ring_alloc(ring, 16);
+ if (r)
+ return r;
+
+ rptr = amdgpu_ring_get_rptr(ring);
+
+ amdgpu_ring_write(ring, VCN_DEC_SW_CMD_END);
+ amdgpu_ring_commit(ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (amdgpu_ring_get_rptr(ring) != rptr)
+ break;
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
+ return r;
+}
+
+static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib_msg,
+ struct dma_fence **fence)
+{
+ u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
+ struct amdgpu_device *adev = ring->adev;
+ struct dma_fence *f = NULL;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ int i, r;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
+ 64, AMDGPU_IB_POOL_DIRECT,
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
+ if (r)
+ goto err;
+
+ ib = &job->ibs[0];
+ ib->ptr[0] = PACKET0(adev->vcn.inst[ring->me].internal.data0, 0);
+ ib->ptr[1] = addr;
+ ib->ptr[2] = PACKET0(adev->vcn.inst[ring->me].internal.data1, 0);
+ ib->ptr[3] = addr >> 32;
+ ib->ptr[4] = PACKET0(adev->vcn.inst[ring->me].internal.cmd, 0);
+ ib->ptr[5] = 0;
+ for (i = 6; i < 16; i += 2) {
+ ib->ptr[i] = PACKET0(adev->vcn.inst[ring->me].internal.nop, 0);
+ ib->ptr[i+1] = 0;
+ }
+ ib->length_dw = 16;
+
+ r = amdgpu_job_submit_direct(job, ring, &f);
+ if (r)
+ goto err_free;
+
+ amdgpu_ib_free(ib_msg, f);
+
+ if (fence)
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
+
+ return 0;
+
+err_free:
+ amdgpu_job_free(job);
+err:
+ amdgpu_ib_free(ib_msg, f);
+ return r;
+}
+
+static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_ib *ib)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t *msg;
+ int r, i;
+
+ memset(ib, 0, sizeof(*ib));
+ r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
+ AMDGPU_IB_POOL_DIRECT,
+ ib);
+ if (r)
+ return r;
+
+ msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
+ msg[0] = cpu_to_le32(0x00000028);
+ msg[1] = cpu_to_le32(0x00000038);
+ msg[2] = cpu_to_le32(0x00000001);
+ msg[3] = cpu_to_le32(0x00000000);
+ msg[4] = cpu_to_le32(handle);
+ msg[5] = cpu_to_le32(0x00000000);
+ msg[6] = cpu_to_le32(0x00000001);
+ msg[7] = cpu_to_le32(0x00000028);
+ msg[8] = cpu_to_le32(0x00000010);
+ msg[9] = cpu_to_le32(0x00000000);
+ msg[10] = cpu_to_le32(0x00000007);
+ msg[11] = cpu_to_le32(0x00000000);
+ msg[12] = cpu_to_le32(0x00000780);
+ msg[13] = cpu_to_le32(0x00000440);
+ for (i = 14; i < 1024; ++i)
+ msg[i] = cpu_to_le32(0x0);
+
+ return 0;
+}
+
+static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_ib *ib)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t *msg;
+ int r, i;
+
+ memset(ib, 0, sizeof(*ib));
+ r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
+ AMDGPU_IB_POOL_DIRECT,
+ ib);
+ if (r)
+ return r;
+
+ msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
+ msg[0] = cpu_to_le32(0x00000028);
+ msg[1] = cpu_to_le32(0x00000018);
+ msg[2] = cpu_to_le32(0x00000000);
+ msg[3] = cpu_to_le32(0x00000002);
+ msg[4] = cpu_to_le32(handle);
+ msg[5] = cpu_to_le32(0x00000000);
+ for (i = 6; i < 1024; ++i)
+ msg[i] = cpu_to_le32(0x0);
+
+ return 0;
+}
+
+int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ struct dma_fence *fence = NULL;
+ struct amdgpu_ib ib;
+ long r;
+
+ r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
+ if (r)
+ goto error;
+
+ r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL);
+ if (r)
+ goto error;
+ r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
+ if (r)
+ goto error;
+
+ r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence);
+ if (r)
+ goto error;
+
+ r = dma_fence_wait_timeout(fence, false, timeout);
+ if (r == 0)
+ r = -ETIMEDOUT;
+ else if (r > 0)
+ r = 0;
+
+ dma_fence_put(fence);
+error:
+ return r;
+}
+
+static uint32_t *amdgpu_vcn_unified_ring_ib_header(struct amdgpu_ib *ib,
+ uint32_t ib_pack_in_dw, bool enc)
+{
+ uint32_t *ib_checksum;
+
+ ib->ptr[ib->length_dw++] = 0x00000010; /* single queue checksum */
+ ib->ptr[ib->length_dw++] = 0x30000002;
+ ib_checksum = &ib->ptr[ib->length_dw++];
+ ib->ptr[ib->length_dw++] = ib_pack_in_dw;
+
+ ib->ptr[ib->length_dw++] = 0x00000010; /* engine info */
+ ib->ptr[ib->length_dw++] = 0x30000001;
+ ib->ptr[ib->length_dw++] = enc ? 0x2 : 0x3;
+ ib->ptr[ib->length_dw++] = ib_pack_in_dw * sizeof(uint32_t);
+
+ return ib_checksum;
+}
+
+static void amdgpu_vcn_unified_ring_ib_checksum(uint32_t **ib_checksum,
+ uint32_t ib_pack_in_dw)
+{
+ uint32_t i;
+ uint32_t checksum = 0;
+
+ for (i = 0; i < ib_pack_in_dw; i++)
+ checksum += *(*ib_checksum + 2 + i);
+
+ **ib_checksum = checksum;
+}
+
+static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
+ struct amdgpu_ib *ib_msg,
+ struct dma_fence **fence)
+{
+ struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
+ unsigned int ib_size_dw = 64;
+ struct amdgpu_device *adev = ring->adev;
+ struct dma_fence *f = NULL;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
+ uint32_t *ib_checksum;
+ uint32_t ib_pack_in_dw;
+ int i, r;
+
+ if (adev->vcn.inst[ring->me].using_unified_queue)
+ ib_size_dw += 8;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
+ ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
+ if (r)
+ goto err;
+
+ ib = &job->ibs[0];
+ ib->length_dw = 0;
+
+ /* single queue headers */
+ if (adev->vcn.inst[ring->me].using_unified_queue) {
+ ib_pack_in_dw = sizeof(struct amdgpu_vcn_decode_buffer) / sizeof(uint32_t)
+ + 4 + 2; /* engine info + decoding ib in dw */
+ ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, ib_pack_in_dw, false);
+ }
+
+ ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
+ ib->ptr[ib->length_dw++] = cpu_to_le32(AMDGPU_VCN_IB_FLAG_DECODE_BUFFER);
+ decode_buffer = (struct amdgpu_vcn_decode_buffer *)&(ib->ptr[ib->length_dw]);
+ ib->length_dw += sizeof(struct amdgpu_vcn_decode_buffer) / 4;
+ memset(decode_buffer, 0, sizeof(struct amdgpu_vcn_decode_buffer));
+
+ decode_buffer->valid_buf_flag |= cpu_to_le32(AMDGPU_VCN_CMD_FLAG_MSG_BUFFER);
+ decode_buffer->msg_buffer_address_hi = cpu_to_le32(addr >> 32);
+ decode_buffer->msg_buffer_address_lo = cpu_to_le32(addr);
+
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+
+ if (adev->vcn.inst[ring->me].using_unified_queue)
+ amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, ib_pack_in_dw);
+
+ r = amdgpu_job_submit_direct(job, ring, &f);
+ if (r)
+ goto err_free;
+
+ amdgpu_ib_free(ib_msg, f);
+
+ if (fence)
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
+
+ return 0;
+
+err_free:
+ amdgpu_job_free(job);
+err:
+ amdgpu_ib_free(ib_msg, f);
+ return r;
+}
+
+int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ struct dma_fence *fence = NULL;
+ struct amdgpu_ib ib;
+ long r;
+
+ r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
+ if (r)
+ goto error;
+
+ r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL);
+ if (r)
+ goto error;
+ r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
+ if (r)
+ goto error;
+
+ r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence);
+ if (r)
+ goto error;
+
+ r = dma_fence_wait_timeout(fence, false, timeout);
+ if (r == 0)
+ r = -ETIMEDOUT;
+ else if (r > 0)
+ r = 0;
+
+ dma_fence_put(fence);
+error:
+ return r;
+}
+
+int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t rptr;
+ unsigned int i;
+ int r;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ r = amdgpu_ring_alloc(ring, 16);
+ if (r)
+ return r;
+
+ rptr = amdgpu_ring_get_rptr(ring);
+
+ amdgpu_ring_write(ring, VCN_ENC_CMD_END);
+ amdgpu_ring_commit(ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (amdgpu_ring_get_rptr(ring) != rptr)
+ break;
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+
+ return r;
+}
+
+static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_ib *ib_msg,
+ struct dma_fence **fence)
+{
+ unsigned int ib_size_dw = 16;
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct dma_fence *f = NULL;
+ uint32_t *ib_checksum = NULL;
+ uint64_t addr;
+ int i, r;
+
+ if (adev->vcn.inst[ring->me].using_unified_queue)
+ ib_size_dw += 8;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
+ ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
+ if (r)
+ return r;
+
+ ib = &job->ibs[0];
+ addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
+
+ ib->length_dw = 0;
+
+ if (adev->vcn.inst[ring->me].using_unified_queue)
+ ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
+
+ ib->ptr[ib->length_dw++] = 0x00000018;
+ ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
+ ib->ptr[ib->length_dw++] = handle;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
+ ib->ptr[ib->length_dw++] = 0x00000000;
+
+ ib->ptr[ib->length_dw++] = 0x00000014;
+ ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
+ ib->ptr[ib->length_dw++] = 0x0000001c;
+ ib->ptr[ib->length_dw++] = 0x00000000;
+ ib->ptr[ib->length_dw++] = 0x00000000;
+
+ ib->ptr[ib->length_dw++] = 0x00000008;
+ ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */
+
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+
+ if (adev->vcn.inst[ring->me].using_unified_queue)
+ amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
+
+ r = amdgpu_job_submit_direct(job, ring, &f);
+ if (r)
+ goto err;
+
+ if (fence)
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
+
+ return 0;
+
+err:
+ amdgpu_job_free(job);
+ return r;
+}
+
+static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
+ struct amdgpu_ib *ib_msg,
+ struct dma_fence **fence)
+{
+ unsigned int ib_size_dw = 16;
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_job *job;
+ struct amdgpu_ib *ib;
+ struct dma_fence *f = NULL;
+ uint32_t *ib_checksum = NULL;
+ uint64_t addr;
+ int i, r;
+
+ if (adev->vcn.inst[ring->me].using_unified_queue)
+ ib_size_dw += 8;
+
+ r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
+ ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
+ &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
+ if (r)
+ return r;
+
+ ib = &job->ibs[0];
+ addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
+
+ ib->length_dw = 0;
+
+ if (adev->vcn.inst[ring->me].using_unified_queue)
+ ib_checksum = amdgpu_vcn_unified_ring_ib_header(ib, 0x11, true);
+
+ ib->ptr[ib->length_dw++] = 0x00000018;
+ ib->ptr[ib->length_dw++] = 0x00000001;
+ ib->ptr[ib->length_dw++] = handle;
+ ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+ ib->ptr[ib->length_dw++] = addr;
+ ib->ptr[ib->length_dw++] = 0x00000000;
+
+ ib->ptr[ib->length_dw++] = 0x00000014;
+ ib->ptr[ib->length_dw++] = 0x00000002;
+ ib->ptr[ib->length_dw++] = 0x0000001c;
+ ib->ptr[ib->length_dw++] = 0x00000000;
+ ib->ptr[ib->length_dw++] = 0x00000000;
+
+ ib->ptr[ib->length_dw++] = 0x00000008;
+ ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */
+
+ for (i = ib->length_dw; i < ib_size_dw; ++i)
+ ib->ptr[i] = 0x0;
+
+ if (adev->vcn.inst[ring->me].using_unified_queue)
+ amdgpu_vcn_unified_ring_ib_checksum(&ib_checksum, 0x11);
+
+ r = amdgpu_job_submit_direct(job, ring, &f);
+ if (r)
+ goto err;
+
+ if (fence)
+ *fence = dma_fence_get(f);
+ dma_fence_put(f);
+
+ return 0;
+
+err:
+ amdgpu_job_free(job);
+ return r;
+}
+
+int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct dma_fence *fence = NULL;
+ struct amdgpu_ib ib;
+ long r;
+
+ memset(&ib, 0, sizeof(ib));
+ r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_IB_POOL_DIRECT,
+ &ib);
+ if (r)
+ return r;
+
+ r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL);
+ if (r)
+ goto error;
+
+ r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence);
+ if (r)
+ goto error;
+
+ r = dma_fence_wait_timeout(fence, false, timeout);
+ if (r == 0)
+ r = -ETIMEDOUT;
+ else if (r > 0)
+ r = 0;
+
+error:
+ amdgpu_ib_free(&ib, fence);
+ dma_fence_put(fence);
+
+ return r;
+}
+
+int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ struct amdgpu_device *adev = ring->adev;
+ long r;
+
+ if ((amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(4, 0, 3)) &&
+ (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(5, 0, 1))) {
+ r = amdgpu_vcn_enc_ring_test_ib(ring, timeout);
+ if (r)
+ goto error;
+ }
+
+ r = amdgpu_vcn_dec_sw_ring_test_ib(ring, timeout);
+
+error:
+ return r;
+}
+
+enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
+{
+ switch (ring) {
+ case 0:
+ return AMDGPU_RING_PRIO_0;
+ case 1:
+ return AMDGPU_RING_PRIO_1;
+ case 2:
+ return AMDGPU_RING_PRIO_2;
+ default:
+ return AMDGPU_RING_PRIO_0;
+ }
+}
+
+void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev, int i)
+{
+ unsigned int idx;
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ const struct common_firmware_header *hdr;
+
+ if (adev->vcn.harvest_config & (1 << i))
+ return;
+
+ if ((amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 3) ||
+ amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(5, 0, 1))
+ && (i > 0))
+ return;
+
+ hdr = (const struct common_firmware_header *)adev->vcn.inst[i].fw->data;
+ /* currently only support 2 FW instances */
+ if (i >= 2) {
+ dev_info(adev->dev, "More then 2 VCN FW instances!\n");
+ return;
+ }
+ idx = AMDGPU_UCODE_ID_VCN + i;
+ adev->firmware.ucode[idx].ucode_id = idx;
+ adev->firmware.ucode[idx].fw = adev->vcn.inst[i].fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
+ }
+}
+
+/*
+ * debugfs for mapping vcn firmware log buffer.
+ */
+#if defined(CONFIG_DEBUG_FS)
+static ssize_t amdgpu_debugfs_vcn_fwlog_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_vcn_inst *vcn;
+ void *log_buf;
+ struct amdgpu_vcn_fwlog *plog;
+ unsigned int read_pos, write_pos, available, i, read_bytes = 0;
+ unsigned int read_num[2] = {0};
+
+ vcn = file_inode(f)->i_private;
+ if (!vcn)
+ return -ENODEV;
+
+ if (!vcn->fw_shared.cpu_addr || !amdgpu_vcnfw_log)
+ return -EFAULT;
+
+ log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
+
+ plog = (struct amdgpu_vcn_fwlog *)log_buf;
+ read_pos = plog->rptr;
+ write_pos = plog->wptr;
+
+ if (read_pos > AMDGPU_VCNFW_LOG_SIZE || write_pos > AMDGPU_VCNFW_LOG_SIZE)
+ return -EFAULT;
+
+ if (!size || (read_pos == write_pos))
+ return 0;
+
+ if (write_pos > read_pos) {
+ available = write_pos - read_pos;
+ read_num[0] = min_t(size_t, size, available);
+ } else {
+ read_num[0] = AMDGPU_VCNFW_LOG_SIZE - read_pos;
+ available = read_num[0] + write_pos - plog->header_size;
+ if (size > available)
+ read_num[1] = write_pos - plog->header_size;
+ else if (size > read_num[0])
+ read_num[1] = size - read_num[0];
+ else
+ read_num[0] = size;
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (read_num[i]) {
+ if (read_pos == AMDGPU_VCNFW_LOG_SIZE)
+ read_pos = plog->header_size;
+ if (read_num[i] == copy_to_user((buf + read_bytes),
+ (log_buf + read_pos), read_num[i]))
+ return -EFAULT;
+
+ read_bytes += read_num[i];
+ read_pos += read_num[i];
+ }
+ }
+
+ plog->rptr = read_pos;
+ *pos += read_bytes;
+ return read_bytes;
+}
+
+static const struct file_operations amdgpu_debugfs_vcnfwlog_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_debugfs_vcn_fwlog_read,
+ .llseek = default_llseek
+};
+#endif
+
+void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev, uint8_t i,
+ struct amdgpu_vcn_inst *vcn)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+ char name[32];
+
+ sprintf(name, "amdgpu_vcn_%d_fwlog", i);
+ debugfs_create_file_size(name, S_IFREG | 0444, root, vcn,
+ &amdgpu_debugfs_vcnfwlog_fops,
+ AMDGPU_VCNFW_LOG_SIZE);
+#endif
+}
+
+void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn)
+{
+#if defined(CONFIG_DEBUG_FS)
+ uint32_t *flag = vcn->fw_shared.cpu_addr;
+ void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size;
+ uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size;
+ struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr;
+ struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr
+ + vcn->fw_shared.log_offset;
+ *flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG);
+ fw_log->is_enabled = 1;
+ fw_log->addr_lo = cpu_to_le32(fw_log_gpu_addr & 0xFFFFFFFF);
+ fw_log->addr_hi = cpu_to_le32(fw_log_gpu_addr >> 32);
+ fw_log->size = cpu_to_le32(AMDGPU_VCNFW_LOG_SIZE);
+
+ log_buf->header_size = sizeof(struct amdgpu_vcn_fwlog);
+ log_buf->buffer_size = AMDGPU_VCNFW_LOG_SIZE;
+ log_buf->rptr = log_buf->header_size;
+ log_buf->wptr = log_buf->header_size;
+ log_buf->wrapped = 0;
+#endif
+}
+
+int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->vcn.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ if (!amdgpu_sriov_vf(adev)) {
+ ih_data.head = *ras_if;
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ } else {
+ if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
+ adev->virt.ops->ras_poison_handler(adev, ras_if->block);
+ else
+ dev_warn(adev->dev,
+ "No ras_poison_handler interface in SRIOV for VCN!\n");
+ }
+
+ return 0;
+}
+
+int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r, i;
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ if (amdgpu_ras_is_supported(adev, ras_block->block)) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i) ||
+ !adev->vcn.inst[i].ras_poison_irq.funcs)
+ continue;
+
+ r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
+ if (r)
+ goto late_fini;
+ }
+ }
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+ return r;
+}
+
+int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_vcn_ras *ras;
+
+ if (!adev->vcn.ras)
+ return 0;
+
+ ras = adev->vcn.ras;
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register vcn ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "vcn");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__VCN;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__POISON;
+ adev->vcn.ras_if = &ras->ras_block.ras_comm;
+
+ if (!ras->ras_block.ras_late_init)
+ ras->ras_block.ras_late_init = amdgpu_vcn_ras_late_init;
+
+ return 0;
+}
+
+int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
+ enum AMDGPU_UCODE_ID ucode_id)
+{
+ struct amdgpu_firmware_info ucode = {
+ .ucode_id = (ucode_id ? ucode_id :
+ (inst_idx ? AMDGPU_UCODE_ID_VCN1_RAM :
+ AMDGPU_UCODE_ID_VCN0_RAM)),
+ .mc_addr = adev->vcn.inst[inst_idx].dpg_sram_gpu_addr,
+ .ucode_size = ((uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_curr_addr -
+ (uintptr_t)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr),
+ };
+
+ return psp_execute_ip_fw_load(&adev->psp, &ucode);
+}
+
+static ssize_t amdgpu_get_vcn_reset_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (!adev)
+ return -ENODEV;
+
+ return amdgpu_show_reset_mask(buf, adev->vcn.supported_reset);
+}
+
+static DEVICE_ATTR(vcn_reset_mask, 0444,
+ amdgpu_get_vcn_reset_mask, NULL);
+
+int amdgpu_vcn_sysfs_reset_mask_init(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (adev->vcn.num_vcn_inst) {
+ r = device_create_file(adev->dev, &dev_attr_vcn_reset_mask);
+ if (r)
+ return r;
+ }
+
+ return r;
+}
+
+void amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device *adev)
+{
+ if (adev->dev->kobj.sd) {
+ if (adev->vcn.num_vcn_inst)
+ device_remove_file(adev->dev, &dev_attr_vcn_reset_mask);
+ }
+}
+
+/*
+ * debugfs to enable/disable vcn job submission to specific core or
+ * instance. It is created only if the queue type is unified.
+ */
+#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_vcn_sched_mask_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i;
+ u64 mask;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+
+ mask = (1ULL << adev->vcn.num_vcn_inst) - 1;
+ if ((val & mask) == 0)
+ return -EINVAL;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ if (val & (1ULL << i))
+ ring->sched.ready = true;
+ else
+ ring->sched.ready = false;
+ }
+ /* publish sched.ready flag update effective immediately across smp */
+ smp_rmb();
+ return 0;
+}
+
+static int amdgpu_debugfs_vcn_sched_mask_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ u32 i;
+ u64 mask = 0;
+ struct amdgpu_ring *ring;
+
+ if (!adev)
+ return -ENODEV;
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ if (ring->sched.ready)
+ mask |= 1ULL << i;
+ }
+ *val = mask;
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_vcn_sched_mask_fops,
+ amdgpu_debugfs_vcn_sched_mask_get,
+ amdgpu_debugfs_vcn_sched_mask_set, "%llx\n");
+#endif
+
+void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+ char name[32];
+
+ if (adev->vcn.num_vcn_inst <= 1 || !adev->vcn.inst[0].using_unified_queue)
+ return;
+ sprintf(name, "amdgpu_vcn_sched_mask");
+ debugfs_create_file(name, 0600, root, adev,
+ &amdgpu_debugfs_vcn_sched_mask_fops);
+#endif
+}
+
+/**
+ * vcn_set_powergating_state - set VCN block powergating state
+ *
+ * @ip_block: amdgpu_ip_block pointer
+ * @state: power gating state
+ *
+ * Set VCN block powergating state
+ */
+int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int ret = 0, i;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i];
+
+ ret |= vinst->set_pg_state(vinst, state);
+ }
+
+ return ret;
+}
+
+/**
+ * amdgpu_vcn_reset_engine - Reset a specific VCN engine
+ * @adev: Pointer to the AMDGPU device
+ * @instance_id: VCN engine instance to reset
+ *
+ * Returns: 0 on success, or a negative error code on failure.
+ */
+static int amdgpu_vcn_reset_engine(struct amdgpu_device *adev,
+ uint32_t instance_id)
+{
+ struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[instance_id];
+ int r, i;
+
+ mutex_lock(&vinst->engine_reset_mutex);
+ /* Stop the scheduler's work queue for the dec and enc rings if they are running.
+ * This ensures that no new tasks are submitted to the queues while
+ * the reset is in progress.
+ */
+ drm_sched_wqueue_stop(&vinst->ring_dec.sched);
+ for (i = 0; i < vinst->num_enc_rings; i++)
+ drm_sched_wqueue_stop(&vinst->ring_enc[i].sched);
+
+ /* Perform the VCN reset for the specified instance */
+ r = vinst->reset(vinst);
+ if (r)
+ goto unlock;
+ r = amdgpu_ring_test_ring(&vinst->ring_dec);
+ if (r)
+ goto unlock;
+ for (i = 0; i < vinst->num_enc_rings; i++) {
+ r = amdgpu_ring_test_ring(&vinst->ring_enc[i]);
+ if (r)
+ goto unlock;
+ }
+ amdgpu_fence_driver_force_completion(&vinst->ring_dec);
+ for (i = 0; i < vinst->num_enc_rings; i++)
+ amdgpu_fence_driver_force_completion(&vinst->ring_enc[i]);
+
+ /* Restart the scheduler's work queue for the dec and enc rings
+ * if they were stopped by this function. This allows new tasks
+ * to be submitted to the queues after the reset is complete.
+ */
+ drm_sched_wqueue_start(&vinst->ring_dec.sched);
+ for (i = 0; i < vinst->num_enc_rings; i++)
+ drm_sched_wqueue_start(&vinst->ring_enc[i].sched);
+
+unlock:
+ mutex_unlock(&vinst->engine_reset_mutex);
+
+ return r;
+}
+
+/**
+ * amdgpu_vcn_ring_reset - Reset a VCN ring
+ * @ring: ring to reset
+ * @vmid: vmid of guilty job
+ * @timedout_fence: fence of timed out job
+ *
+ * This helper is for VCN blocks without unified queues because
+ * resetting the engine resets all queues in that case. With
+ * unified queues we have one queue per engine.
+ * Returns: 0 on success, or a negative error code on failure.
+ */
+int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (adev->vcn.inst[ring->me].using_unified_queue)
+ return -EINVAL;
+
+ return amdgpu_vcn_reset_engine(adev, ring->me);
+}
+
+int amdgpu_vcn_reg_dump_init(struct amdgpu_device *adev,
+ const struct amdgpu_hwip_reg_entry *reg, u32 count)
+{
+ adev->vcn.ip_dump = kcalloc(adev->vcn.num_vcn_inst * count,
+ sizeof(uint32_t), GFP_KERNEL);
+ if (!adev->vcn.ip_dump)
+ return -ENOMEM;
+ adev->vcn.reg_list = reg;
+ adev->vcn.reg_count = count;
+
+ return 0;
+}
+
+static void amdgpu_vcn_reg_dump_fini(struct amdgpu_device *adev)
+{
+ kfree(adev->vcn.ip_dump);
+ adev->vcn.ip_dump = NULL;
+ adev->vcn.reg_list = NULL;
+ adev->vcn.reg_count = 0;
+}
+
+void amdgpu_vcn_dump_ip_state(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, j;
+ bool is_powered;
+ u32 inst_off;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+
+ inst_off = i * adev->vcn.reg_count;
+ /* mmUVD_POWER_STATUS is always readable and is the first in reg_list */
+ adev->vcn.ip_dump[inst_off] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->vcn.reg_list[0], i));
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF) !=
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
+
+ if (is_powered)
+ for (j = 1; j < adev->vcn.reg_count; j++)
+ adev->vcn.ip_dump[inst_off + j] =
+ RREG32(SOC15_REG_ENTRY_OFFSET_INST(adev->vcn.reg_list[j], i));
+ }
+}
+
+void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i, j;
+ bool is_powered;
+ u32 inst_off;
+
+ if (!adev->vcn.ip_dump)
+ return;
+
+ drm_printf(p, "num_instances:%d\n", adev->vcn.num_vcn_inst);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i)) {
+ drm_printf(p, "\nHarvested Instance:VCN%d Skipping dump\n", i);
+ continue;
+ }
+
+ inst_off = i * adev->vcn.reg_count;
+ is_powered = (adev->vcn.ip_dump[inst_off] &
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF) !=
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF;
+
+ if (is_powered) {
+ drm_printf(p, "\nActive Instance:VCN%d\n", i);
+ for (j = 0; j < adev->vcn.reg_count; j++)
+ drm_printf(p, "%-50s \t 0x%08x\n", adev->vcn.reg_list[j].reg_name,
+ adev->vcn.ip_dump[inst_off + j]);
+ } else {
+ drm_printf(p, "\nInactive Instance:VCN%d\n", i);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
new file mode 100644
index 000000000000..dc8a17bcc3c8
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -0,0 +1,571 @@
+/*
+ * Copyright 2016-2024 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_VCN_H__
+#define __AMDGPU_VCN_H__
+
+#include "amdgpu_ras.h"
+
+#define AMDGPU_VCN_STACK_SIZE (128*1024)
+#define AMDGPU_VCN_CONTEXT_SIZE (512*1024)
+
+#define AMDGPU_VCN_FIRMWARE_OFFSET 256
+#define AMDGPU_VCN_MAX_ENC_RINGS 3
+
+#define AMDGPU_MAX_VCN_INSTANCES 4
+#define AMDGPU_MAX_VCN_ENC_RINGS (AMDGPU_VCN_MAX_ENC_RINGS * AMDGPU_MAX_VCN_INSTANCES)
+
+#define AMDGPU_VCN_HARVEST_VCN0 (1 << 0)
+#define AMDGPU_VCN_HARVEST_VCN1 (1 << 1)
+
+#define VCN_DEC_KMD_CMD 0x80000000
+#define VCN_DEC_CMD_FENCE 0x00000000
+#define VCN_DEC_CMD_TRAP 0x00000001
+#define VCN_DEC_CMD_WRITE_REG 0x00000004
+#define VCN_DEC_CMD_REG_READ_COND_WAIT 0x00000006
+#define VCN_DEC_CMD_PACKET_START 0x0000000a
+#define VCN_DEC_CMD_PACKET_END 0x0000000b
+
+#define VCN_DEC_SW_CMD_NO_OP 0x00000000
+#define VCN_DEC_SW_CMD_END 0x00000001
+#define VCN_DEC_SW_CMD_IB 0x00000002
+#define VCN_DEC_SW_CMD_FENCE 0x00000003
+#define VCN_DEC_SW_CMD_TRAP 0x00000004
+#define VCN_DEC_SW_CMD_IB_AUTO 0x00000005
+#define VCN_DEC_SW_CMD_SEMAPHORE 0x00000006
+#define VCN_DEC_SW_CMD_PREEMPT_FENCE 0x00000009
+#define VCN_DEC_SW_CMD_REG_WRITE 0x0000000b
+#define VCN_DEC_SW_CMD_REG_WAIT 0x0000000c
+
+#define VCN_ENC_CMD_NO_OP 0x00000000
+#define VCN_ENC_CMD_END 0x00000001
+#define VCN_ENC_CMD_IB 0x00000002
+#define VCN_ENC_CMD_FENCE 0x00000003
+#define VCN_ENC_CMD_TRAP 0x00000004
+#define VCN_ENC_CMD_REG_WRITE 0x0000000b
+#define VCN_ENC_CMD_REG_WAIT 0x0000000c
+
+#define VCN_AON_SOC_ADDRESS_2_0 0x1f800
+#define VCN_VID_IP_ADDRESS_2_0 0x0
+#define VCN_AON_IP_ADDRESS_2_0 0x30000
+
+#define mmUVD_RBC_XX_IB_REG_CHECK 0x026b
+#define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1
+#define mmUVD_REG_XX_MASK 0x026c
+#define mmUVD_REG_XX_MASK_BASE_IDX 1
+
+/* 1 second timeout */
+#define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000)
+
+#define RREG32_SOC15_DPG_MODE_1_0(ip, inst_idx, reg, mask, sram_sel) \
+ ({ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); \
+ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, \
+ UVD_DPG_LMA_CTL__MASK_EN_MASK | \
+ ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) \
+ << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \
+ (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
+ RREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA); \
+ })
+
+#define WREG32_SOC15_DPG_MODE_1_0(ip, inst_idx, reg, value, mask, sram_sel) \
+ do { \
+ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA, value); \
+ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); \
+ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, \
+ UVD_DPG_LMA_CTL__READ_WRITE_MASK | \
+ ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) \
+ << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \
+ (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \
+ } while (0)
+
+#define SOC15_DPG_MODE_OFFSET(ip, inst_idx, reg) \
+ ({ \
+ uint32_t internal_reg_offset, addr; \
+ bool video_range, video1_range, aon_range, aon1_range; \
+ \
+ addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \
+ addr <<= 2; \
+ video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS_2_0)) && \
+ ((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS_2_0 + 0x2600))))); \
+ video1_range = ((((0xFFFFF & addr) >= (VCN1_VID_SOC_ADDRESS_3_0)) && \
+ ((0xFFFFF & addr) < ((VCN1_VID_SOC_ADDRESS_3_0 + 0x2600))))); \
+ aon_range = ((((0xFFFFF & addr) >= (VCN_AON_SOC_ADDRESS_2_0)) && \
+ ((0xFFFFF & addr) < ((VCN_AON_SOC_ADDRESS_2_0 + 0x600))))); \
+ aon1_range = ((((0xFFFFF & addr) >= (VCN1_AON_SOC_ADDRESS_3_0)) && \
+ ((0xFFFFF & addr) < ((VCN1_AON_SOC_ADDRESS_3_0 + 0x600))))); \
+ if (video_range) \
+ internal_reg_offset = ((0xFFFFF & addr) - (VCN_VID_SOC_ADDRESS_2_0) + \
+ (VCN_VID_IP_ADDRESS_2_0)); \
+ else if (aon_range) \
+ internal_reg_offset = ((0xFFFFF & addr) - (VCN_AON_SOC_ADDRESS_2_0) + \
+ (VCN_AON_IP_ADDRESS_2_0)); \
+ else if (video1_range) \
+ internal_reg_offset = ((0xFFFFF & addr) - (VCN1_VID_SOC_ADDRESS_3_0) + \
+ (VCN_VID_IP_ADDRESS_2_0)); \
+ else if (aon1_range) \
+ internal_reg_offset = ((0xFFFFF & addr) - (VCN1_AON_SOC_ADDRESS_3_0) + \
+ (VCN_AON_IP_ADDRESS_2_0)); \
+ else \
+ internal_reg_offset = (0xFFFFF & addr); \
+ \
+ internal_reg_offset >>= 2; \
+ })
+
+#define RREG32_SOC15_DPG_MODE(inst_idx, offset, mask_en) \
+ ({ \
+ WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL, \
+ (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
+ mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
+ offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
+ RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA); \
+ })
+
+#define WREG32_SOC15_DPG_MODE(inst_idx, offset, value, mask_en, indirect) \
+ do { \
+ if (!indirect) { \
+ WREG32_SOC15(VCN, GET_INST(VCN, inst_idx), \
+ mmUVD_DPG_LMA_DATA, value); \
+ WREG32_SOC15( \
+ VCN, GET_INST(VCN, inst_idx), \
+ mmUVD_DPG_LMA_CTL, \
+ (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
+ mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
+ offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
+ } else { \
+ *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = \
+ offset; \
+ *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = \
+ value; \
+ } \
+ } while (0)
+
+#define SOC24_DPG_MODE_OFFSET(ip, inst_idx, reg) \
+ ({ \
+ uint32_t internal_reg_offset, addr; \
+ bool video_range, video1_range, aon_range, aon1_range; \
+ \
+ addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \
+ addr <<= 2; \
+ video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS)) && \
+ ((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS + 0x2600))))); \
+ video1_range = ((((0xFFFFF & addr) >= (VCN1_VID_SOC_ADDRESS)) && \
+ ((0xFFFFF & addr) < ((VCN1_VID_SOC_ADDRESS + 0x2600))))); \
+ aon_range = ((((0xFFFFF & addr) >= (VCN_AON_SOC_ADDRESS)) && \
+ ((0xFFFFF & addr) < ((VCN_AON_SOC_ADDRESS + 0x600))))); \
+ aon1_range = ((((0xFFFFF & addr) >= (VCN1_AON_SOC_ADDRESS)) && \
+ ((0xFFFFF & addr) < ((VCN1_AON_SOC_ADDRESS + 0x600))))); \
+ if (video_range) \
+ internal_reg_offset = ((0xFFFFF & addr) - (VCN_VID_SOC_ADDRESS) + \
+ (VCN_VID_IP_ADDRESS)); \
+ else if (aon_range) \
+ internal_reg_offset = ((0xFFFFF & addr) - (VCN_AON_SOC_ADDRESS) + \
+ (VCN_AON_IP_ADDRESS)); \
+ else if (video1_range) \
+ internal_reg_offset = ((0xFFFFF & addr) - (VCN1_VID_SOC_ADDRESS) + \
+ (VCN_VID_IP_ADDRESS)); \
+ else if (aon1_range) \
+ internal_reg_offset = ((0xFFFFF & addr) - (VCN1_AON_SOC_ADDRESS) + \
+ (VCN_AON_IP_ADDRESS)); \
+ else \
+ internal_reg_offset = (0xFFFFF & addr); \
+ \
+ internal_reg_offset >>= 2; \
+ })
+
+#define WREG32_SOC24_DPG_MODE(inst_idx, offset, value, mask_en, indirect) \
+ do { \
+ if (!indirect) { \
+ WREG32_SOC15(VCN, GET_INST(VCN, inst_idx), \
+ regUVD_DPG_LMA_DATA, value); \
+ WREG32_SOC15( \
+ VCN, GET_INST(VCN, inst_idx), \
+ regUVD_DPG_LMA_CTL, \
+ (0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \
+ mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \
+ offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
+ } else { \
+ *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = \
+ offset; \
+ *adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ = \
+ value; \
+ } \
+ } while (0)
+
+#define AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE (1 << 2)
+#define AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT (1 << 4)
+#define AMDGPU_VCN_FW_SHARED_FLAG_0_RB (1 << 6)
+#define AMDGPU_VCN_MULTI_QUEUE_FLAG (1 << 8)
+#define AMDGPU_VCN_SW_RING_FLAG (1 << 9)
+#define AMDGPU_VCN_FW_LOGGING_FLAG (1 << 10)
+#define AMDGPU_VCN_SMU_VERSION_INFO_FLAG (1 << 11)
+#define AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG (1 << 11)
+#define AMDGPU_VCN_VF_RB_SETUP_FLAG (1 << 14)
+#define AMDGPU_VCN_VF_RB_DECOUPLE_FLAG (1 << 15)
+
+#define MAX_NUM_VCN_RB_SETUP 4
+
+#define AMDGPU_VCN_IB_FLAG_DECODE_BUFFER 0x00000001
+#define AMDGPU_VCN_CMD_FLAG_MSG_BUFFER 0x00000001
+
+#define VCN_CODEC_DISABLE_MASK_AV1 (1 << 0)
+#define VCN_CODEC_DISABLE_MASK_VP9 (1 << 1)
+#define VCN_CODEC_DISABLE_MASK_HEVC (1 << 2)
+#define VCN_CODEC_DISABLE_MASK_H264 (1 << 3)
+
+#define AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU (0)
+#define AMDGPU_VCN_SMU_DPM_INTERFACE_APU (1)
+
+#define AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING 2
+
+struct amdgpu_hwip_reg_entry;
+
+enum amdgpu_vcn_caps {
+ AMDGPU_VCN_RRMT_ENABLED,
+};
+
+#define AMDGPU_VCN_CAPS(caps) BIT(AMDGPU_VCN_##caps)
+
+enum fw_queue_mode {
+ FW_QUEUE_RING_RESET = 1,
+ FW_QUEUE_DPG_HOLD_OFF = 2,
+};
+
+enum engine_status_constants {
+ UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON = 0x2AAAA0,
+ UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0 = 0xAAAA0,
+ UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0 = 0x2A2A8AA0,
+ UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON = 0x00000002,
+ UVD_STATUS__UVD_BUSY = 0x00000004,
+ GB_ADDR_CONFIG_DEFAULT = 0x26010011,
+ UVD_STATUS__IDLE = 0x2,
+ UVD_STATUS__BUSY = 0x5,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF = 0x1,
+ UVD_STATUS__RBC_BUSY = 0x1,
+ UVD_PGFSM_STATUS_UVDJ_PWR_ON = 0,
+};
+
+enum internal_dpg_state {
+ VCN_DPG_STATE__UNPAUSE = 0,
+ VCN_DPG_STATE__PAUSE,
+};
+
+struct dpg_pause_state {
+ enum internal_dpg_state fw_based;
+ enum internal_dpg_state jpeg;
+};
+
+struct amdgpu_vcn_reg{
+ unsigned data0;
+ unsigned data1;
+ unsigned cmd;
+ unsigned nop;
+ unsigned context_id;
+ unsigned ib_vmid;
+ unsigned ib_bar_low;
+ unsigned ib_bar_high;
+ unsigned ib_size;
+ unsigned gp_scratch8;
+ unsigned scratch9;
+};
+
+struct amdgpu_vcn_fw_shared {
+ void *cpu_addr;
+ uint64_t gpu_addr;
+ uint32_t mem_size;
+ uint32_t log_offset;
+};
+
+struct amdgpu_vcn_inst {
+ struct amdgpu_device *adev;
+ int inst;
+ struct amdgpu_bo *vcpu_bo;
+ void *cpu_addr;
+ uint64_t gpu_addr;
+ void *saved_bo;
+ struct amdgpu_ring ring_dec;
+ struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
+ atomic_t sched_score;
+ struct amdgpu_irq_src irq;
+ struct amdgpu_irq_src ras_poison_irq;
+ struct amdgpu_vcn_reg external;
+ struct amdgpu_bo *dpg_sram_bo;
+ struct dpg_pause_state pause_state;
+ void *dpg_sram_cpu_addr;
+ uint64_t dpg_sram_gpu_addr;
+ uint32_t *dpg_sram_curr_addr;
+ atomic_t dpg_enc_submission_cnt;
+ struct amdgpu_vcn_fw_shared fw_shared;
+ uint8_t aid_id;
+ const struct firmware *fw; /* VCN firmware */
+ uint8_t vcn_config;
+ uint32_t vcn_codec_disable_mask;
+ atomic_t total_submission_cnt;
+ struct mutex vcn_pg_lock;
+ enum amd_powergating_state cur_state;
+ struct delayed_work idle_work;
+ unsigned fw_version;
+ unsigned num_enc_rings;
+ bool indirect_sram;
+ struct amdgpu_vcn_reg internal;
+ struct mutex vcn1_jpeg1_workaround;
+ int (*pause_dpg_mode)(struct amdgpu_vcn_inst *vinst,
+ struct dpg_pause_state *new_state);
+ int (*set_pg_state)(struct amdgpu_vcn_inst *vinst,
+ enum amd_powergating_state state);
+ int (*reset)(struct amdgpu_vcn_inst *vinst);
+ bool using_unified_queue;
+ struct mutex engine_reset_mutex;
+};
+
+struct amdgpu_vcn_ras {
+ struct amdgpu_ras_block_object ras_block;
+};
+
+struct amdgpu_vcn {
+ uint8_t num_vcn_inst;
+ struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
+
+ unsigned harvest_config;
+
+ struct ras_common_if *ras_if;
+ struct amdgpu_vcn_ras *ras;
+
+ uint16_t inst_mask;
+ uint8_t num_inst_per_aid;
+
+ /* IP reg dump */
+ uint32_t *ip_dump;
+
+ uint32_t supported_reset;
+ uint32_t caps;
+
+ bool per_inst_fw;
+ unsigned fw_version;
+
+ bool workload_profile_active;
+ struct mutex workload_profile_mutex;
+ u32 reg_count;
+ const struct amdgpu_hwip_reg_entry *reg_list;
+};
+
+struct amdgpu_fw_shared_rb_ptrs_struct {
+ /* to WA DPG R/W ptr issues.*/
+ uint32_t rptr;
+ uint32_t wptr;
+};
+
+struct amdgpu_fw_shared_multi_queue {
+ uint8_t decode_queue_mode;
+ uint8_t encode_generalpurpose_queue_mode;
+ uint8_t encode_lowlatency_queue_mode;
+ uint8_t encode_realtime_queue_mode;
+ uint8_t padding[4];
+};
+
+struct amdgpu_fw_shared_sw_ring {
+ uint8_t is_enabled;
+ uint8_t padding[3];
+};
+
+struct amdgpu_fw_shared_unified_queue_struct {
+ uint8_t is_enabled;
+ uint8_t queue_mode;
+ uint8_t queue_status;
+ uint8_t padding[5];
+};
+
+struct amdgpu_fw_shared_fw_logging {
+ uint8_t is_enabled;
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint32_t size;
+};
+
+struct amdgpu_fw_shared_smu_interface_info {
+ uint8_t smu_interface_type;
+ uint8_t padding[3];
+};
+
+struct amdgpu_fw_shared {
+ uint32_t present_flag_0;
+ uint8_t pad[44];
+ struct amdgpu_fw_shared_rb_ptrs_struct rb;
+ uint8_t pad1[1];
+ struct amdgpu_fw_shared_multi_queue multi_queue;
+ struct amdgpu_fw_shared_sw_ring sw_ring;
+ struct amdgpu_fw_shared_fw_logging fw_log;
+ struct amdgpu_fw_shared_smu_interface_info smu_interface_info;
+};
+
+struct amdgpu_vcn_rb_setup_info {
+ uint32_t rb_addr_lo;
+ uint32_t rb_addr_hi;
+ uint32_t rb_size;
+};
+
+struct amdgpu_fw_shared_rb_setup {
+ uint32_t is_rb_enabled_flags;
+
+ union {
+ struct {
+ uint32_t rb_addr_lo;
+ uint32_t rb_addr_hi;
+ uint32_t rb_size;
+ uint32_t rb4_addr_lo;
+ uint32_t rb4_addr_hi;
+ uint32_t rb4_size;
+ uint32_t reserved[6];
+ };
+
+ struct {
+ struct amdgpu_vcn_rb_setup_info rb_info[MAX_NUM_VCN_RB_SETUP];
+ };
+ };
+};
+
+struct amdgpu_fw_shared_drm_key_wa {
+ uint8_t method;
+ uint8_t reserved[3];
+};
+
+struct amdgpu_fw_shared_queue_decouple {
+ uint8_t is_enabled;
+ uint8_t reserved[7];
+};
+
+struct amdgpu_vcn4_fw_shared {
+ uint32_t present_flag_0;
+ uint8_t pad[12];
+ struct amdgpu_fw_shared_unified_queue_struct sq;
+ uint8_t pad1[8];
+ struct amdgpu_fw_shared_fw_logging fw_log;
+ uint8_t pad2[20];
+ struct amdgpu_fw_shared_rb_setup rb_setup;
+ struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface;
+ struct amdgpu_fw_shared_drm_key_wa drm_key_wa;
+ uint8_t pad3[9];
+ struct amdgpu_fw_shared_queue_decouple decouple;
+};
+
+struct amdgpu_vcn_fwlog {
+ uint32_t rptr;
+ uint32_t wptr;
+ uint32_t buffer_size;
+ uint32_t header_size;
+ uint8_t wrapped;
+};
+
+struct amdgpu_vcn_decode_buffer {
+ uint32_t valid_buf_flag;
+ uint32_t msg_buffer_address_hi;
+ uint32_t msg_buffer_address_lo;
+ uint32_t pad[30];
+};
+
+struct amdgpu_vcn_rb_metadata {
+ uint32_t size;
+ uint32_t present_flag_0;
+
+ uint8_t version;
+ uint8_t ring_id;
+ uint8_t pad[26];
+};
+
+struct amdgpu_vcn5_fw_shared {
+ uint32_t present_flag_0;
+ uint8_t pad[12];
+ struct amdgpu_fw_shared_unified_queue_struct sq;
+ uint8_t pad1[8];
+ struct amdgpu_fw_shared_fw_logging fw_log;
+ uint8_t pad2[20];
+ struct amdgpu_fw_shared_rb_setup rb_setup;
+ struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface;
+ struct amdgpu_fw_shared_drm_key_wa drm_key_wa;
+ uint8_t pad3[404];
+};
+
+#define VCN_BLOCK_ENCODE_DISABLE_MASK 0x80
+#define VCN_BLOCK_DECODE_DISABLE_MASK 0x40
+#define VCN_BLOCK_QUEUE_DISABLE_MASK 0xC0
+
+enum vcn_ring_type {
+ VCN_ENCODE_RING,
+ VCN_DECODE_RING,
+ VCN_UNIFIED_RING,
+};
+
+int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i);
+int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i);
+void amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i);
+int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i);
+int amdgpu_vcn_resume(struct amdgpu_device *adev, int i);
+void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring);
+void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring);
+
+bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev,
+ enum vcn_ring_type type, uint32_t vcn_instance);
+
+int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring);
+int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring);
+int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+
+int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring);
+int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+
+enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring);
+
+void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev, int i);
+
+void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn);
+void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev,
+ uint8_t i, struct amdgpu_vcn_inst *vcn);
+
+int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
+int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block);
+int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev);
+
+int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
+ enum AMDGPU_UCODE_ID ucode_id);
+int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev);
+int amdgpu_vcn_sysfs_reset_mask_init(struct amdgpu_device *adev);
+void amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device *adev);
+void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev);
+
+int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state);
+int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *guilty_fence);
+int amdgpu_vcn_reg_dump_init(struct amdgpu_device *adev,
+ const struct amdgpu_hwip_reg_entry *reg, u32 count);
+void amdgpu_vcn_dump_ip_state(struct amdgpu_ip_block *ip_block);
+void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p);
+void amdgpu_vcn_get_profile(struct amdgpu_device *adev);
+void amdgpu_vcn_put_profile(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c
new file mode 100644
index 000000000000..f9d3d79f68b1
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_vf_error.h"
+#include "mxgpu_ai.h"
+
+void amdgpu_vf_error_put(struct amdgpu_device *adev,
+ uint16_t sub_error_code,
+ uint16_t error_flags,
+ uint64_t error_data)
+{
+ int index;
+ uint16_t error_code;
+
+ if (!amdgpu_sriov_vf(adev))
+ return;
+
+ error_code = AMDGIM_ERROR_CODE(AMDGIM_ERROR_CATEGORY_VF, sub_error_code);
+
+ mutex_lock(&adev->virt.vf_errors.lock);
+ index = adev->virt.vf_errors.write_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
+ adev->virt.vf_errors.code [index] = error_code;
+ adev->virt.vf_errors.flags [index] = error_flags;
+ adev->virt.vf_errors.data [index] = error_data;
+ adev->virt.vf_errors.write_count ++;
+ mutex_unlock(&adev->virt.vf_errors.lock);
+}
+
+
+void amdgpu_vf_error_trans_all(struct amdgpu_device *adev)
+{
+ /* u32 pf2vf_flags = 0; */
+ u32 data1, data2, data3;
+ int index;
+
+ if ((NULL == adev) || (!amdgpu_sriov_vf(adev)) ||
+ (!adev->virt.ops) || (!adev->virt.ops->trans_msg)) {
+ return;
+ }
+/*
+ TODO: Enable these code when pv2vf_info is merged
+ AMDGPU_FW_VRAM_PF2VF_READ (adev, feature_flags, &pf2vf_flags);
+ if (!(pf2vf_flags & AMDGIM_FEATURE_ERROR_LOG_COLLECT)) {
+ return;
+ }
+*/
+
+ mutex_lock(&adev->virt.vf_errors.lock);
+ /* The errors are overlay of array, correct read_count as full. */
+ if (adev->virt.vf_errors.write_count - adev->virt.vf_errors.read_count > AMDGPU_VF_ERROR_ENTRY_SIZE) {
+ adev->virt.vf_errors.read_count = adev->virt.vf_errors.write_count - AMDGPU_VF_ERROR_ENTRY_SIZE;
+ }
+
+ while (adev->virt.vf_errors.read_count < adev->virt.vf_errors.write_count) {
+ index =adev->virt.vf_errors.read_count % AMDGPU_VF_ERROR_ENTRY_SIZE;
+ data1 = AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX(adev->virt.vf_errors.code[index],
+ adev->virt.vf_errors.flags[index]);
+ data2 = adev->virt.vf_errors.data[index] & 0xFFFFFFFF;
+ data3 = (adev->virt.vf_errors.data[index] >> 32) & 0xFFFFFFFF;
+
+ adev->virt.ops->trans_msg(adev, IDH_LOG_VF_ERROR, data1, data2, data3);
+ adev->virt.vf_errors.read_count ++;
+ }
+ mutex_unlock(&adev->virt.vf_errors.lock);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h
new file mode 100644
index 000000000000..6436bd053325
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __VF_ERROR_H__
+#define __VF_ERROR_H__
+
+#define AMDGIM_ERROR_CODE_FLAGS_TO_MAILBOX(c,f) (((c & 0xFFFF) << 16) | (f & 0xFFFF))
+#define AMDGIM_ERROR_CODE(t,c) (((t&0xF)<<12)|(c&0xFFF))
+
+/* Please keep enum same as AMD GIM driver */
+enum AMDGIM_ERROR_VF {
+ AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL = 0,
+ AMDGIM_ERROR_VF_NO_VBIOS,
+ AMDGIM_ERROR_VF_GPU_POST_ERROR,
+ AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL,
+ AMDGIM_ERROR_VF_FENCE_INIT_FAIL,
+
+ AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL,
+ AMDGIM_ERROR_VF_IB_INIT_FAIL,
+ AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL,
+ AMDGIM_ERROR_VF_ASIC_RESUME_FAIL,
+ AMDGIM_ERROR_VF_GPU_RESET_FAIL,
+
+ AMDGIM_ERROR_VF_TEST,
+ AMDGIM_ERROR_VF_MAX
+};
+
+enum AMDGIM_ERROR_CATEGORY {
+ AMDGIM_ERROR_CATEGORY_NON_USED = 0,
+ AMDGIM_ERROR_CATEGORY_GIM,
+ AMDGIM_ERROR_CATEGORY_PF,
+ AMDGIM_ERROR_CATEGORY_VF,
+ AMDGIM_ERROR_CATEGORY_VBIOS,
+ AMDGIM_ERROR_CATEGORY_MONITOR,
+
+ AMDGIM_ERROR_CATEGORY_MAX
+};
+
+void amdgpu_vf_error_put(struct amdgpu_device *adev,
+ uint16_t sub_error_code,
+ uint16_t error_flags,
+ uint64_t error_data);
+void amdgpu_vf_error_trans_all (struct amdgpu_device *adev);
+
+#endif /* __VF_ERROR_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 6bf5cea294f2..3328ab63376b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -21,147 +21,61 @@
*
*/
-#include "amdgpu.h"
+#include <linux/module.h>
-int amdgpu_allocate_static_csa(struct amdgpu_device *adev)
-{
- int r;
- void *ptr;
+#ifdef CONFIG_X86
+#include <asm/hypervisor.h>
+#endif
- r = amdgpu_bo_create_kernel(adev, AMDGPU_CSA_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->virt.csa_obj,
- &adev->virt.csa_vmid0_addr, &ptr);
- if (r)
- return r;
+#include <drm/drm_drv.h>
+#include <xen/xen.h>
- memset(ptr, 0, AMDGPU_CSA_SIZE);
- return 0;
-}
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_dpm.h"
+#include "vi.h"
+#include "soc15.h"
+#include "nv.h"
-/*
- * amdgpu_map_static_csa should be called during amdgpu_vm_init
- * it maps virtual address "AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE"
- * to this VM, and each command submission of GFX should use this virtual
- * address within META_DATA init package to support SRIOV gfx preemption.
- */
+#define POPULATE_UCODE_INFO(vf2pf_info, ucode, ver) \
+ do { \
+ vf2pf_info->ucode_info[ucode].id = ucode; \
+ vf2pf_info->ucode_info[ucode].version = ver; \
+ } while (0)
-int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
{
- int r;
- struct amdgpu_bo_va *bo_va;
- struct ww_acquire_ctx ticket;
- struct list_head list;
- struct amdgpu_bo_list_entry pd;
- struct ttm_validate_buffer csa_tv;
-
- INIT_LIST_HEAD(&list);
- INIT_LIST_HEAD(&csa_tv.head);
- csa_tv.bo = &adev->virt.csa_obj->tbo;
- csa_tv.shared = true;
-
- list_add(&csa_tv.head, &list);
- amdgpu_vm_get_pd_bo(vm, &list, &pd);
-
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
- if (r) {
- DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
- return r;
- }
-
- bo_va = amdgpu_vm_bo_add(adev, vm, adev->virt.csa_obj);
- if (!bo_va) {
- ttm_eu_backoff_reservation(&ticket, &list);
- DRM_ERROR("failed to create bo_va for static CSA\n");
- return -ENOMEM;
- }
-
- r = amdgpu_vm_alloc_pts(adev, bo_va->vm, AMDGPU_CSA_VADDR,
- AMDGPU_CSA_SIZE);
- if (r) {
- DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
- amdgpu_vm_bo_rmv(adev, bo_va);
- ttm_eu_backoff_reservation(&ticket, &list);
- return r;
- }
-
- r = amdgpu_vm_bo_map(adev, bo_va, AMDGPU_CSA_VADDR, 0,AMDGPU_CSA_SIZE,
- AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
- AMDGPU_PTE_EXECUTABLE);
-
- if (r) {
- DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r);
- amdgpu_vm_bo_rmv(adev, bo_va);
- ttm_eu_backoff_reservation(&ticket, &list);
- return r;
- }
-
- vm->csa_bo_va = bo_va;
- ttm_eu_backoff_reservation(&ticket, &list);
- return 0;
+ /* By now all MMIO pages except mailbox are blocked */
+ /* if blocking is enabled in hypervisor. Choose the */
+ /* SCRATCH_REG0 to test. */
+ return RREG32_NO_KIQ(0xc040) == 0xffffffff;
}
void amdgpu_virt_init_setting(struct amdgpu_device *adev)
{
- /* enable virtual display */
- adev->mode_info.num_crtc = 1;
- adev->enable_virtual_display = true;
-
- mutex_init(&adev->virt.lock_kiq);
- mutex_init(&adev->virt.lock_reset);
-}
-
-uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
-{
- signed long r;
- uint32_t val;
- struct dma_fence *f;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- struct amdgpu_ring *ring = &kiq->ring;
-
- BUG_ON(!ring->funcs->emit_rreg);
-
- mutex_lock(&adev->virt.lock_kiq);
- amdgpu_ring_alloc(ring, 32);
- amdgpu_ring_emit_rreg(ring, reg);
- amdgpu_fence_emit(ring, &f);
- amdgpu_ring_commit(ring);
- mutex_unlock(&adev->virt.lock_kiq);
-
- r = dma_fence_wait(f, false);
- if (r)
- DRM_ERROR("wait for kiq fence error: %ld.\n", r);
- dma_fence_put(f);
-
- val = adev->wb.wb[adev->virt.reg_val_offs];
-
- return val;
-}
-
-void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
-{
- signed long r;
- struct dma_fence *f;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
- struct amdgpu_ring *ring = &kiq->ring;
+ struct drm_device *ddev = adev_to_drm(adev);
- BUG_ON(!ring->funcs->emit_wreg);
-
- mutex_lock(&adev->virt.lock_kiq);
- amdgpu_ring_alloc(ring, 32);
- amdgpu_ring_emit_wreg(ring, reg, v);
- amdgpu_fence_emit(ring, &f);
- amdgpu_ring_commit(ring);
- mutex_unlock(&adev->virt.lock_kiq);
+ /* enable virtual display */
+ if (adev->asic_type != CHIP_ALDEBARAN &&
+ adev->asic_type != CHIP_ARCTURUS &&
+ ((adev->pdev->class >> 8) != PCI_CLASS_ACCELERATOR_PROCESSING)) {
+ if (adev->mode_info.num_crtc == 0)
+ adev->mode_info.num_crtc = 1;
+ adev->enable_virtual_display = true;
+ }
+ ddev->driver_features &= ~DRIVER_ATOMIC;
+ adev->cg_flags = 0;
+ adev->pg_flags = 0;
- r = dma_fence_wait(f, false);
- if (r)
- DRM_ERROR("wait for kiq fence error: %ld.\n", r);
- dma_fence_put(f);
+ /* Reduce kcq number to 2 to reduce latency */
+ if (amdgpu_num_kcq == -1)
+ amdgpu_num_kcq = 2;
}
/**
* amdgpu_virt_request_full_gpu() - request full gpu access
- * @amdgpu: amdgpu device.
+ * @adev: amdgpu device.
* @init: is driver init time.
* When start to init/fini driver, first need to request full gpu access.
* Return: Zero if request success, otherwise will return error.
@@ -173,8 +87,10 @@ int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
if (virt->ops && virt->ops->req_full_gpu) {
r = virt->ops->req_full_gpu(adev, init);
- if (r)
+ if (r) {
+ adev->no_hw_access = true;
return r;
+ }
adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
}
@@ -184,7 +100,7 @@ int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
/**
* amdgpu_virt_release_full_gpu() - release full gpu access
- * @amdgpu: amdgpu device.
+ * @adev: amdgpu device.
* @init: is driver init time.
* When finishing driver init/fini, need to release full gpu access.
* Return: Zero if release success, otherwise will returen error.
@@ -206,7 +122,7 @@ int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
/**
* amdgpu_virt_reset_gpu() - reset gpu
- * @amdgpu: amdgpu device.
+ * @adev: amdgpu device.
* Send reset command to GPU hypervisor to reset GPU that VM is using
* Return: Zero if reset success, otherwise will return error.
*/
@@ -226,9 +142,52 @@ int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
return 0;
}
+void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (virt->ops && virt->ops->req_init_data)
+ virt->ops->req_init_data(adev);
+
+ if (adev->virt.req_init_data_ver > 0)
+ DRM_INFO("host supports REQ_INIT_DATA handshake\n");
+ else
+ DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
+}
+
+/**
+ * amdgpu_virt_ready_to_reset() - send ready to reset to host
+ * @adev: amdgpu device.
+ * Send ready to reset message to GPU hypervisor to signal we have stopped GPU
+ * activity and is ready for host FLR
+ */
+void amdgpu_virt_ready_to_reset(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (virt->ops && virt->ops->reset_gpu)
+ virt->ops->ready_to_reset(adev);
+}
+
+/**
+ * amdgpu_virt_wait_reset() - wait for reset gpu completed
+ * @adev: amdgpu device.
+ * Wait for GPU reset completed.
+ * Return: Zero if reset success, otherwise will return error.
+ */
+int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (!virt->ops || !virt->ops->wait_reset)
+ return -EINVAL;
+
+ return virt->ops->wait_reset(adev);
+}
+
/**
* amdgpu_virt_alloc_mm_table() - alloc memory for mm table
- * @amdgpu: amdgpu device.
+ * @adev: amdgpu device.
* MM table is used by UVD and VCE for its initialization
* Return: Zero if allocate success.
*/
@@ -240,7 +199,8 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
return 0;
r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->virt.mm_table.bo,
&adev->virt.mm_table.gpu_addr,
(void *)&adev->virt.mm_table.cpu_addr);
@@ -258,7 +218,7 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
/**
* amdgpu_virt_free_mm_table() - free mm table memory
- * @amdgpu: amdgpu device.
+ * @adev: amdgpu device.
* Free MM table memory
*/
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
@@ -271,3 +231,1328 @@ void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
(void *)&adev->virt.mm_table.cpu_addr);
adev->virt.mm_table.gpu_addr = 0;
}
+
+/**
+ * amdgpu_virt_rcvd_ras_interrupt() - receive ras interrupt
+ * @adev: amdgpu device.
+ * Check whether host sent RAS error message
+ * Return: true if found, otherwise false
+ */
+bool amdgpu_virt_rcvd_ras_interrupt(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (!virt->ops || !virt->ops->rcvd_ras_intr)
+ return false;
+
+ return virt->ops->rcvd_ras_intr(adev);
+}
+
+
+unsigned int amd_sriov_msg_checksum(void *obj,
+ unsigned long obj_size,
+ unsigned int key,
+ unsigned int checksum)
+{
+ unsigned int ret = key;
+ unsigned long i = 0;
+ unsigned char *pos;
+
+ pos = (char *)obj;
+ /* calculate checksum */
+ for (i = 0; i < obj_size; ++i)
+ ret += *(pos + i);
+ /* minus the checksum itself */
+ pos = (char *)&checksum;
+ for (i = 0; i < sizeof(checksum); ++i)
+ ret -= *(pos + i);
+ return ret;
+}
+
+static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+ struct amdgpu_virt_ras_err_handler_data **data = &virt->virt_eh_data;
+ /* GPU will be marked bad on host if bp count more then 10,
+ * so alloc 512 is enough.
+ */
+ unsigned int align_space = 512;
+ void *bps = NULL;
+ struct amdgpu_bo **bps_bo = NULL;
+
+ *data = kmalloc(sizeof(struct amdgpu_virt_ras_err_handler_data), GFP_KERNEL);
+ if (!*data)
+ goto data_failure;
+
+ bps = kmalloc_array(align_space, sizeof(*(*data)->bps), GFP_KERNEL);
+ if (!bps)
+ goto bps_failure;
+
+ bps_bo = kmalloc_array(align_space, sizeof(*(*data)->bps_bo), GFP_KERNEL);
+ if (!bps_bo)
+ goto bps_bo_failure;
+
+ (*data)->bps = bps;
+ (*data)->bps_bo = bps_bo;
+ (*data)->count = 0;
+ (*data)->last_reserved = 0;
+
+ virt->ras_init_done = true;
+
+ return 0;
+
+bps_bo_failure:
+ kfree(bps);
+bps_failure:
+ kfree(*data);
+data_failure:
+ return -ENOMEM;
+}
+
+static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+ struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
+ struct amdgpu_bo *bo;
+ int i;
+
+ if (!data)
+ return;
+
+ for (i = data->last_reserved - 1; i >= 0; i--) {
+ bo = data->bps_bo[i];
+ if (bo) {
+ amdgpu_bo_free_kernel(&bo, NULL, NULL);
+ data->bps_bo[i] = bo;
+ }
+ data->last_reserved = i;
+ }
+}
+
+void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+ struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
+
+ virt->ras_init_done = false;
+
+ if (!data)
+ return;
+
+ amdgpu_virt_ras_release_bp(adev);
+
+ kfree(data->bps);
+ kfree(data->bps_bo);
+ kfree(data);
+ virt->virt_eh_data = NULL;
+}
+
+static void amdgpu_virt_ras_add_bps(struct amdgpu_device *adev,
+ struct eeprom_table_record *bps, int pages)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+ struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
+
+ if (!data)
+ return;
+
+ memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
+ data->count += pages;
+}
+
+static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+ struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
+ struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
+ struct ttm_resource_manager *man = &mgr->manager;
+ struct amdgpu_bo *bo = NULL;
+ uint64_t bp;
+ int i;
+
+ if (!data)
+ return;
+
+ for (i = data->last_reserved; i < data->count; i++) {
+ bp = data->bps[i].retired_page;
+
+ /* There are two cases of reserve error should be ignored:
+ * 1) a ras bad page has been allocated (used by someone);
+ * 2) a ras bad page has been reserved (duplicate error injection
+ * for one page);
+ */
+ if (ttm_resource_manager_used(man)) {
+ amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
+ bp << AMDGPU_GPU_PAGE_SHIFT,
+ AMDGPU_GPU_PAGE_SIZE);
+ data->bps_bo[i] = NULL;
+ } else {
+ if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
+ AMDGPU_GPU_PAGE_SIZE,
+ &bo, NULL))
+ DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
+ data->bps_bo[i] = bo;
+ }
+ data->last_reserved = i + 1;
+ bo = NULL;
+ }
+}
+
+static bool amdgpu_virt_ras_check_bad_page(struct amdgpu_device *adev,
+ uint64_t retired_page)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+ struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
+ int i;
+
+ if (!data)
+ return true;
+
+ for (i = 0; i < data->count; i++)
+ if (retired_page == data->bps[i].retired_page)
+ return true;
+
+ return false;
+}
+
+static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
+ uint64_t bp_block_offset, uint32_t bp_block_size)
+{
+ struct eeprom_table_record bp;
+ uint64_t retired_page;
+ uint32_t bp_idx, bp_cnt;
+ void *vram_usage_va = NULL;
+
+ if (adev->mman.fw_vram_usage_va)
+ vram_usage_va = adev->mman.fw_vram_usage_va;
+ else
+ vram_usage_va = adev->mman.drv_vram_usage_va;
+
+ memset(&bp, 0, sizeof(bp));
+
+ if (bp_block_size) {
+ bp_cnt = bp_block_size / sizeof(uint64_t);
+ for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) {
+ retired_page = *(uint64_t *)(vram_usage_va +
+ bp_block_offset + bp_idx * sizeof(uint64_t));
+ bp.retired_page = retired_page;
+
+ if (amdgpu_virt_ras_check_bad_page(adev, retired_page))
+ continue;
+
+ amdgpu_virt_ras_add_bps(adev, &bp, 1);
+
+ amdgpu_virt_ras_reserve_bps(adev);
+ }
+ }
+}
+
+static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
+{
+ struct amd_sriov_msg_pf2vf_info_header *pf2vf_info = adev->virt.fw_reserve.p_pf2vf;
+ uint32_t checksum;
+ uint32_t checkval;
+
+ uint32_t i;
+ uint32_t tmp;
+
+ if (adev->virt.fw_reserve.p_pf2vf == NULL)
+ return -EINVAL;
+
+ if (pf2vf_info->size > 1024) {
+ dev_err(adev->dev, "invalid pf2vf message size: 0x%x\n", pf2vf_info->size);
+ return -EINVAL;
+ }
+
+ switch (pf2vf_info->version) {
+ case 1:
+ checksum = ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->checksum;
+ checkval = amd_sriov_msg_checksum(
+ adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
+ adev->virt.fw_reserve.checksum_key, checksum);
+ if (checksum != checkval) {
+ dev_err(adev->dev,
+ "invalid pf2vf message: header checksum=0x%x calculated checksum=0x%x\n",
+ checksum, checkval);
+ return -EINVAL;
+ }
+
+ adev->virt.gim_feature =
+ ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->feature_flags;
+ break;
+ case 2:
+ /* TODO: missing key, need to add it later */
+ checksum = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->checksum;
+ checkval = amd_sriov_msg_checksum(
+ adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
+ 0, checksum);
+ if (checksum != checkval) {
+ dev_err(adev->dev,
+ "invalid pf2vf message: header checksum=0x%x calculated checksum=0x%x\n",
+ checksum, checkval);
+ return -EINVAL;
+ }
+
+ adev->virt.vf2pf_update_interval_ms =
+ ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms;
+ adev->virt.gim_feature =
+ ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all;
+ adev->virt.reg_access =
+ ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->reg_access_flags.all;
+
+ adev->virt.decode_max_dimension_pixels = 0;
+ adev->virt.decode_max_frame_pixels = 0;
+ adev->virt.encode_max_dimension_pixels = 0;
+ adev->virt.encode_max_frame_pixels = 0;
+ adev->virt.is_mm_bw_enabled = false;
+ for (i = 0; i < AMD_SRIOV_MSG_RESERVE_VCN_INST; i++) {
+ tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_dimension_pixels;
+ adev->virt.decode_max_dimension_pixels = max(tmp, adev->virt.decode_max_dimension_pixels);
+
+ tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].decode_max_frame_pixels;
+ adev->virt.decode_max_frame_pixels = max(tmp, adev->virt.decode_max_frame_pixels);
+
+ tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_dimension_pixels;
+ adev->virt.encode_max_dimension_pixels = max(tmp, adev->virt.encode_max_dimension_pixels);
+
+ tmp = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->mm_bw_management[i].encode_max_frame_pixels;
+ adev->virt.encode_max_frame_pixels = max(tmp, adev->virt.encode_max_frame_pixels);
+ }
+ if ((adev->virt.decode_max_dimension_pixels > 0) || (adev->virt.encode_max_dimension_pixels > 0))
+ adev->virt.is_mm_bw_enabled = true;
+
+ adev->unique_id =
+ ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->uuid;
+ adev->virt.ras_en_caps.all = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->ras_en_caps.all;
+ adev->virt.ras_telemetry_en_caps.all =
+ ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->ras_telemetry_en_caps.all;
+ break;
+ default:
+ dev_err(adev->dev, "invalid pf2vf version: 0x%x\n", pf2vf_info->version);
+ return -EINVAL;
+ }
+
+ /* correct too large or too little interval value */
+ if (adev->virt.vf2pf_update_interval_ms < 200 || adev->virt.vf2pf_update_interval_ms > 10000)
+ adev->virt.vf2pf_update_interval_ms = 2000;
+
+ return 0;
+}
+
+static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
+{
+ struct amd_sriov_msg_vf2pf_info *vf2pf_info;
+ vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
+
+ if (adev->virt.fw_reserve.p_vf2pf == NULL)
+ return;
+
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCE, adev->vce.fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_UVD, adev->uvd.fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MC, adev->gmc.fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ME, adev->gfx.me_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_PFP, adev->gfx.pfp_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_CE, adev->gfx.ce_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC, adev->gfx.rlc_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLC, adev->gfx.rlc_srlc_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLG, adev->gfx.rlc_srlg_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos.fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,
+ adev->psp.asd_context.bin_desc.fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS,
+ adev->psp.ras_context.context.bin_desc.fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI,
+ adev->psp.xgmi_context.context.bin_desc.fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC, adev->pm.fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA, adev->sdma.instance[0].fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2, adev->sdma.instance[1].fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCN, adev->vcn.fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_DMCU, adev->dm.dmcu_fw_version);
+}
+
+static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
+{
+ struct amd_sriov_msg_vf2pf_info *vf2pf_info;
+
+ vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
+
+ if (adev->virt.fw_reserve.p_vf2pf == NULL)
+ return -EINVAL;
+
+ memset(vf2pf_info, 0, sizeof(struct amd_sriov_msg_vf2pf_info));
+
+ vf2pf_info->header.size = sizeof(struct amd_sriov_msg_vf2pf_info);
+ vf2pf_info->header.version = AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER;
+
+#ifdef MODULE
+ if (THIS_MODULE->version != NULL)
+ strcpy(vf2pf_info->driver_version, THIS_MODULE->version);
+ else
+#endif
+ strcpy(vf2pf_info->driver_version, "N/A");
+
+ vf2pf_info->pf2vf_version_required = 0; // no requirement, guest understands all
+ vf2pf_info->driver_cert = 0;
+ vf2pf_info->os_info.all = 0;
+
+ vf2pf_info->fb_usage =
+ ttm_resource_manager_usage(&adev->mman.vram_mgr.manager) >> 20;
+ vf2pf_info->fb_vis_usage =
+ amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
+ vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
+ vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
+
+ amdgpu_virt_populate_vf2pf_ucode_info(adev);
+
+ /* TODO: read dynamic info */
+ vf2pf_info->gfx_usage = 0;
+ vf2pf_info->compute_usage = 0;
+ vf2pf_info->encode_usage = 0;
+ vf2pf_info->decode_usage = 0;
+
+ vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
+ if (amdgpu_sriov_is_mes_info_enable(adev)) {
+ vf2pf_info->mes_info_addr =
+ (uint64_t)(adev->mes.resource_1_gpu_addr[0] + AMDGPU_GPU_PAGE_SIZE);
+ vf2pf_info->mes_info_size =
+ adev->mes.resource_1[0]->tbo.base.size - AMDGPU_GPU_PAGE_SIZE;
+ }
+ vf2pf_info->checksum =
+ amd_sriov_msg_checksum(
+ vf2pf_info, sizeof(*vf2pf_info), 0, 0);
+
+ return 0;
+}
+
+static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
+{
+ struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
+ int ret;
+
+ ret = amdgpu_virt_read_pf2vf_data(adev);
+ if (ret) {
+ adev->virt.vf2pf_update_retry_cnt++;
+
+ if ((amdgpu_virt_rcvd_ras_interrupt(adev) ||
+ adev->virt.vf2pf_update_retry_cnt >= AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT) &&
+ amdgpu_sriov_runtime(adev)) {
+
+ amdgpu_ras_set_fed(adev, true);
+ if (amdgpu_reset_domain_schedule(adev->reset_domain,
+ &adev->kfd.reset_work))
+ return;
+ else
+ dev_err(adev->dev, "Failed to queue work! at %s", __func__);
+ }
+
+ goto out;
+ }
+
+ adev->virt.vf2pf_update_retry_cnt = 0;
+ amdgpu_virt_write_vf2pf_data(adev);
+
+out:
+ schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
+}
+
+void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
+{
+ if (adev->virt.vf2pf_update_interval_ms != 0) {
+ DRM_INFO("clean up the vf2pf work item\n");
+ cancel_delayed_work_sync(&adev->virt.vf2pf_work);
+ adev->virt.vf2pf_update_interval_ms = 0;
+ }
+}
+
+void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
+{
+ adev->virt.fw_reserve.p_pf2vf = NULL;
+ adev->virt.fw_reserve.p_vf2pf = NULL;
+ adev->virt.vf2pf_update_interval_ms = 0;
+ adev->virt.vf2pf_update_retry_cnt = 0;
+
+ if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) {
+ DRM_WARN("Currently fw_vram and drv_vram should not have values at the same time!");
+ } else if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
+ /* go through this logic in ip_init and reset to init workqueue*/
+ amdgpu_virt_exchange_data(adev);
+
+ INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
+ schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
+ } else if (adev->bios != NULL) {
+ /* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+
+ amdgpu_virt_read_pf2vf_data(adev);
+ }
+}
+
+
+void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
+{
+ uint64_t bp_block_offset = 0;
+ uint32_t bp_block_size = 0;
+ struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
+
+ if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) {
+ if (adev->mman.fw_vram_usage_va) {
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+ adev->virt.fw_reserve.p_vf2pf =
+ (struct amd_sriov_msg_vf2pf_info_header *)
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
+ adev->virt.fw_reserve.ras_telemetry =
+ (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10));
+ } else if (adev->mman.drv_vram_usage_va) {
+ adev->virt.fw_reserve.p_pf2vf =
+ (struct amd_sriov_msg_pf2vf_info_header *)
+ (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+ adev->virt.fw_reserve.p_vf2pf =
+ (struct amd_sriov_msg_vf2pf_info_header *)
+ (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
+ adev->virt.fw_reserve.ras_telemetry =
+ (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB << 10));
+ }
+
+ amdgpu_virt_read_pf2vf_data(adev);
+ amdgpu_virt_write_vf2pf_data(adev);
+
+ /* bad page handling for version 2 */
+ if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
+ pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf;
+
+ bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) |
+ ((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000);
+ bp_block_size = pf2vf_v2->bp_block_size;
+
+ if (bp_block_size && !adev->virt.ras_init_done)
+ amdgpu_virt_init_ras_err_handler_data(adev);
+
+ if (adev->virt.ras_init_done)
+ amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
+ }
+ }
+}
+
+static u32 amdgpu_virt_init_detect_asic(struct amdgpu_device *adev)
+{
+ uint32_t reg;
+
+ switch (adev->asic_type) {
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
+ break;
+ case CHIP_VEGA10:
+ case CHIP_VEGA20:
+ case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_ARCTURUS:
+ case CHIP_ALDEBARAN:
+ case CHIP_IP_DISCOVERY:
+ reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
+ break;
+ default: /* other chip doesn't support SRIOV */
+ reg = 0;
+ break;
+ }
+
+ if (reg & 1)
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
+
+ if (reg & 0x80000000)
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
+
+ if (!reg) {
+ /* passthrough mode exclus sriov mod */
+ if (is_virtual_machine() && !xen_initial_domain())
+ adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
+ }
+
+ return reg;
+}
+
+static bool amdgpu_virt_init_req_data(struct amdgpu_device *adev, u32 reg)
+{
+ bool is_sriov = false;
+
+ /* we have the ability to check now */
+ if (amdgpu_sriov_vf(adev)) {
+ is_sriov = true;
+
+ switch (adev->asic_type) {
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ vi_set_virt_ops(adev);
+ break;
+ case CHIP_VEGA10:
+ soc15_set_virt_ops(adev);
+#ifdef CONFIG_X86
+ /* not send GPU_INIT_DATA with MS_HYPERV*/
+ if (!hypervisor_is_type(X86_HYPER_MS_HYPERV))
+#endif
+ /* send a dummy GPU_INIT_DATA request to host on vega10 */
+ amdgpu_virt_request_init_data(adev);
+ break;
+ case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
+ case CHIP_ALDEBARAN:
+ soc15_set_virt_ops(adev);
+ break;
+ case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_IP_DISCOVERY:
+ nv_set_virt_ops(adev);
+ /* try send GPU_INIT_DATA request to host */
+ amdgpu_virt_request_init_data(adev);
+ break;
+ default: /* other chip doesn't support SRIOV */
+ is_sriov = false;
+ DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type);
+ break;
+ }
+ }
+
+ return is_sriov;
+}
+
+static void amdgpu_virt_init_ras(struct amdgpu_device *adev)
+{
+ ratelimit_state_init(&adev->virt.ras.ras_error_cnt_rs, 5 * HZ, 1);
+ ratelimit_state_init(&adev->virt.ras.ras_cper_dump_rs, 5 * HZ, 1);
+ ratelimit_state_init(&adev->virt.ras.ras_chk_criti_rs, 5 * HZ, 1);
+
+ ratelimit_set_flags(&adev->virt.ras.ras_error_cnt_rs,
+ RATELIMIT_MSG_ON_RELEASE);
+ ratelimit_set_flags(&adev->virt.ras.ras_cper_dump_rs,
+ RATELIMIT_MSG_ON_RELEASE);
+ ratelimit_set_flags(&adev->virt.ras.ras_chk_criti_rs,
+ RATELIMIT_MSG_ON_RELEASE);
+
+ mutex_init(&adev->virt.ras.ras_telemetry_mutex);
+
+ adev->virt.ras.cper_rptr = 0;
+}
+
+void amdgpu_virt_init(struct amdgpu_device *adev)
+{
+ bool is_sriov = false;
+ uint32_t reg = amdgpu_virt_init_detect_asic(adev);
+
+ is_sriov = amdgpu_virt_init_req_data(adev, reg);
+
+ if (is_sriov)
+ amdgpu_virt_init_ras(adev);
+}
+
+static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
+{
+ return amdgpu_sriov_is_debug(adev) ? true : false;
+}
+
+static bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
+{
+ return amdgpu_sriov_is_normal(adev) ? true : false;
+}
+
+int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
+{
+ if (!amdgpu_sriov_vf(adev) ||
+ amdgpu_virt_access_debugfs_is_kiq(adev))
+ return 0;
+
+ if (amdgpu_virt_access_debugfs_is_mmio(adev))
+ adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
+ else
+ return -EPERM;
+
+ return 0;
+}
+
+void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev))
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
+}
+
+enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev)
+{
+ enum amdgpu_sriov_vf_mode mode;
+
+ if (amdgpu_sriov_vf(adev)) {
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ mode = SRIOV_VF_MODE_ONE_VF;
+ else
+ mode = SRIOV_VF_MODE_MULTI_VF;
+ } else {
+ mode = SRIOV_VF_MODE_BARE_METAL;
+ }
+
+ return mode;
+}
+
+void amdgpu_virt_pre_reset(struct amdgpu_device *adev)
+{
+ /* stop the data exchange thread */
+ amdgpu_virt_fini_data_exchange(adev);
+ amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_FLR);
+}
+
+void amdgpu_virt_post_reset(struct amdgpu_device *adev)
+{
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(11, 0, 3)) {
+ /* force set to GFXOFF state after reset,
+ * to avoid some invalid operation before GC enable
+ */
+ adev->gfx.is_poweron = false;
+ }
+
+ adev->mes.ring[0].sched.ready = false;
+}
+
+bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev, uint32_t ucode_id)
+{
+ switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
+ case IP_VERSION(13, 0, 0):
+ /* no vf autoload, white list */
+ if (ucode_id == AMDGPU_UCODE_ID_VCN1 ||
+ ucode_id == AMDGPU_UCODE_ID_VCN)
+ return false;
+ else
+ return true;
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 7):
+ /* black list for CHIP_NAVI12 and CHIP_SIENNA_CICHLID */
+ if (ucode_id == AMDGPU_UCODE_ID_RLC_G
+ || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
+ || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
+ || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
+ || ucode_id == AMDGPU_UCODE_ID_SMC)
+ return true;
+ else
+ return false;
+ case IP_VERSION(13, 0, 10):
+ /* white list */
+ if (ucode_id == AMDGPU_UCODE_ID_CAP
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK
+ || ucode_id == AMDGPU_UCODE_ID_CP_MES
+ || ucode_id == AMDGPU_UCODE_ID_CP_MES_DATA
+ || ucode_id == AMDGPU_UCODE_ID_CP_MES1
+ || ucode_id == AMDGPU_UCODE_ID_CP_MES1_DATA
+ || ucode_id == AMDGPU_UCODE_ID_VCN1
+ || ucode_id == AMDGPU_UCODE_ID_VCN)
+ return false;
+ else
+ return true;
+ default:
+ /* lagacy black list */
+ if (ucode_id == AMDGPU_UCODE_ID_SDMA0
+ || ucode_id == AMDGPU_UCODE_ID_SDMA1
+ || ucode_id == AMDGPU_UCODE_ID_SDMA2
+ || ucode_id == AMDGPU_UCODE_ID_SDMA3
+ || ucode_id == AMDGPU_UCODE_ID_SDMA4
+ || ucode_id == AMDGPU_UCODE_ID_SDMA5
+ || ucode_id == AMDGPU_UCODE_ID_SDMA6
+ || ucode_id == AMDGPU_UCODE_ID_SDMA7
+ || ucode_id == AMDGPU_UCODE_ID_RLC_G
+ || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
+ || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
+ || ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
+ || ucode_id == AMDGPU_UCODE_ID_SMC)
+ return true;
+ else
+ return false;
+ }
+}
+
+void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
+ struct amdgpu_video_codec_info *encode, uint32_t encode_array_size,
+ struct amdgpu_video_codec_info *decode, uint32_t decode_array_size)
+{
+ uint32_t i;
+
+ if (!adev->virt.is_mm_bw_enabled)
+ return;
+
+ if (encode) {
+ for (i = 0; i < encode_array_size; i++) {
+ encode[i].max_width = adev->virt.encode_max_dimension_pixels;
+ encode[i].max_pixels_per_frame = adev->virt.encode_max_frame_pixels;
+ if (encode[i].max_width > 0)
+ encode[i].max_height = encode[i].max_pixels_per_frame / encode[i].max_width;
+ else
+ encode[i].max_height = 0;
+ }
+ }
+
+ if (decode) {
+ for (i = 0; i < decode_array_size; i++) {
+ decode[i].max_width = adev->virt.decode_max_dimension_pixels;
+ decode[i].max_pixels_per_frame = adev->virt.decode_max_frame_pixels;
+ if (decode[i].max_width > 0)
+ decode[i].max_height = decode[i].max_pixels_per_frame / decode[i].max_width;
+ else
+ decode[i].max_height = 0;
+ }
+ }
+}
+
+bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
+ u32 acc_flags, u32 hwip,
+ bool write, u32 *rlcg_flag)
+{
+ bool ret = false;
+
+ switch (hwip) {
+ case GC_HWIP:
+ if (amdgpu_sriov_reg_indirect_gc(adev)) {
+ *rlcg_flag =
+ write ? AMDGPU_RLCG_GC_WRITE : AMDGPU_RLCG_GC_READ;
+ ret = true;
+ /* only in new version, AMDGPU_REGS_NO_KIQ and
+ * AMDGPU_REGS_RLC are enabled simultaneously */
+ } else if ((acc_flags & AMDGPU_REGS_RLC) &&
+ !(acc_flags & AMDGPU_REGS_NO_KIQ) && write) {
+ *rlcg_flag = AMDGPU_RLCG_GC_WRITE_LEGACY;
+ ret = true;
+ }
+ break;
+ case MMHUB_HWIP:
+ if (amdgpu_sriov_reg_indirect_mmhub(adev) &&
+ (acc_flags & AMDGPU_REGS_RLC) && write) {
+ *rlcg_flag = AMDGPU_RLCG_MMHUB_WRITE;
+ ret = true;
+ }
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id)
+{
+ struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
+ uint32_t timeout = 50000;
+ uint32_t i, tmp;
+ uint32_t ret = 0;
+ void *scratch_reg0;
+ void *scratch_reg1;
+ void *scratch_reg2;
+ void *scratch_reg3;
+ void *spare_int;
+ unsigned long flags;
+
+ if (!adev->gfx.rlc.rlcg_reg_access_supported) {
+ dev_err(adev->dev,
+ "indirect registers access through rlcg is not available\n");
+ return 0;
+ }
+
+ if (adev->gfx.xcc_mask && (((1 << xcc_id) & adev->gfx.xcc_mask) == 0)) {
+ dev_err(adev->dev, "invalid xcc\n");
+ return 0;
+ }
+
+ if (amdgpu_device_skip_hw_access(adev))
+ return 0;
+
+ reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl[xcc_id];
+ scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0;
+ scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
+ scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
+ scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
+
+ spin_lock_irqsave(&adev->virt.rlcg_reg_lock, flags);
+
+ if (reg_access_ctrl->spare_int)
+ spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
+
+ if (offset == reg_access_ctrl->grbm_cntl) {
+ /* if the target reg offset is grbm_cntl, write to scratch_reg2 */
+ writel(v, scratch_reg2);
+ if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY)
+ writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
+ } else if (offset == reg_access_ctrl->grbm_idx) {
+ /* if the target reg offset is grbm_idx, write to scratch_reg3 */
+ writel(v, scratch_reg3);
+ if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY)
+ writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
+ } else {
+ /*
+ * SCRATCH_REG0 = read/write value
+ * SCRATCH_REG1[30:28] = command
+ * SCRATCH_REG1[19:0] = address in dword
+ * SCRATCH_REG1[27:24] = Error reporting
+ */
+ writel(v, scratch_reg0);
+ writel((offset | flag), scratch_reg1);
+ if (reg_access_ctrl->spare_int)
+ writel(1, spare_int);
+
+ for (i = 0; i < timeout; i++) {
+ tmp = readl(scratch_reg1);
+ if (!(tmp & AMDGPU_RLCG_SCRATCH1_ADDRESS_MASK))
+ break;
+ udelay(10);
+ }
+
+ tmp = readl(scratch_reg1);
+ if (i >= timeout || (tmp & AMDGPU_RLCG_SCRATCH1_ERROR_MASK) != 0) {
+ if (amdgpu_sriov_rlcg_error_report_enabled(adev)) {
+ if (tmp & AMDGPU_RLCG_VFGATE_DISABLED) {
+ dev_err(adev->dev,
+ "vfgate is disabled, rlcg failed to program reg: 0x%05x\n", offset);
+ } else if (tmp & AMDGPU_RLCG_WRONG_OPERATION_TYPE) {
+ dev_err(adev->dev,
+ "wrong operation type, rlcg failed to program reg: 0x%05x\n", offset);
+ } else if (tmp & AMDGPU_RLCG_REG_NOT_IN_RANGE) {
+ dev_err(adev->dev,
+ "register is not in range, rlcg failed to program reg: 0x%05x\n", offset);
+ } else {
+ dev_err(adev->dev,
+ "unknown error type, rlcg failed to program reg: 0x%05x\n", offset);
+ }
+ } else {
+ dev_err(adev->dev,
+ "timeout: rlcg faled to program reg: 0x%05x\n", offset);
+ }
+ }
+ }
+
+ ret = readl(scratch_reg0);
+
+ spin_unlock_irqrestore(&adev->virt.rlcg_reg_lock, flags);
+
+ return ret;
+}
+
+void amdgpu_sriov_wreg(struct amdgpu_device *adev,
+ u32 offset, u32 value,
+ u32 acc_flags, u32 hwip, u32 xcc_id)
+{
+ u32 rlcg_flag;
+
+ if (amdgpu_device_skip_hw_access(adev))
+ return;
+
+ if (!amdgpu_sriov_runtime(adev) &&
+ amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) {
+ amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag, xcc_id);
+ return;
+ }
+
+ if (acc_flags & AMDGPU_REGS_NO_KIQ)
+ WREG32_NO_KIQ(offset, value);
+ else
+ WREG32(offset, value);
+}
+
+u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
+ u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id)
+{
+ u32 rlcg_flag;
+
+ if (amdgpu_device_skip_hw_access(adev))
+ return 0;
+
+ if (!amdgpu_sriov_runtime(adev) &&
+ amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag))
+ return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag, xcc_id);
+
+ if (acc_flags & AMDGPU_REGS_NO_KIQ)
+ return RREG32_NO_KIQ(offset);
+ else
+ return RREG32(offset);
+}
+
+bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev)
+{
+ bool xnack_mode = true;
+
+ if (amdgpu_sriov_vf(adev) &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2))
+ xnack_mode = false;
+
+ return xnack_mode;
+}
+
+bool amdgpu_virt_get_ras_capability(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!amdgpu_sriov_ras_caps_en(adev))
+ return false;
+
+ if (adev->virt.ras_en_caps.bits.block_umc)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__UMC);
+ if (adev->virt.ras_en_caps.bits.block_sdma)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SDMA);
+ if (adev->virt.ras_en_caps.bits.block_gfx)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__GFX);
+ if (adev->virt.ras_en_caps.bits.block_mmhub)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MMHUB);
+ if (adev->virt.ras_en_caps.bits.block_athub)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__ATHUB);
+ if (adev->virt.ras_en_caps.bits.block_pcie_bif)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__PCIE_BIF);
+ if (adev->virt.ras_en_caps.bits.block_hdp)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__HDP);
+ if (adev->virt.ras_en_caps.bits.block_xgmi_wafl)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__XGMI_WAFL);
+ if (adev->virt.ras_en_caps.bits.block_df)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__DF);
+ if (adev->virt.ras_en_caps.bits.block_smn)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SMN);
+ if (adev->virt.ras_en_caps.bits.block_sem)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__SEM);
+ if (adev->virt.ras_en_caps.bits.block_mp0)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MP0);
+ if (adev->virt.ras_en_caps.bits.block_mp1)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MP1);
+ if (adev->virt.ras_en_caps.bits.block_fuse)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__FUSE);
+ if (adev->virt.ras_en_caps.bits.block_mca)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MCA);
+ if (adev->virt.ras_en_caps.bits.block_vcn)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__VCN);
+ if (adev->virt.ras_en_caps.bits.block_jpeg)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__JPEG);
+ if (adev->virt.ras_en_caps.bits.block_ih)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__IH);
+ if (adev->virt.ras_en_caps.bits.block_mpio)
+ adev->ras_hw_enabled |= BIT(AMDGPU_RAS_BLOCK__MPIO);
+
+ if (adev->virt.ras_en_caps.bits.poison_propogation_mode)
+ con->poison_supported = true; /* Poison is handled by host */
+
+ return true;
+}
+
+static inline enum amd_sriov_ras_telemetry_gpu_block
+amdgpu_ras_block_to_sriov(struct amdgpu_device *adev, enum amdgpu_ras_block block) {
+ switch (block) {
+ case AMDGPU_RAS_BLOCK__UMC:
+ return RAS_TELEMETRY_GPU_BLOCK_UMC;
+ case AMDGPU_RAS_BLOCK__SDMA:
+ return RAS_TELEMETRY_GPU_BLOCK_SDMA;
+ case AMDGPU_RAS_BLOCK__GFX:
+ return RAS_TELEMETRY_GPU_BLOCK_GFX;
+ case AMDGPU_RAS_BLOCK__MMHUB:
+ return RAS_TELEMETRY_GPU_BLOCK_MMHUB;
+ case AMDGPU_RAS_BLOCK__ATHUB:
+ return RAS_TELEMETRY_GPU_BLOCK_ATHUB;
+ case AMDGPU_RAS_BLOCK__PCIE_BIF:
+ return RAS_TELEMETRY_GPU_BLOCK_PCIE_BIF;
+ case AMDGPU_RAS_BLOCK__HDP:
+ return RAS_TELEMETRY_GPU_BLOCK_HDP;
+ case AMDGPU_RAS_BLOCK__XGMI_WAFL:
+ return RAS_TELEMETRY_GPU_BLOCK_XGMI_WAFL;
+ case AMDGPU_RAS_BLOCK__DF:
+ return RAS_TELEMETRY_GPU_BLOCK_DF;
+ case AMDGPU_RAS_BLOCK__SMN:
+ return RAS_TELEMETRY_GPU_BLOCK_SMN;
+ case AMDGPU_RAS_BLOCK__SEM:
+ return RAS_TELEMETRY_GPU_BLOCK_SEM;
+ case AMDGPU_RAS_BLOCK__MP0:
+ return RAS_TELEMETRY_GPU_BLOCK_MP0;
+ case AMDGPU_RAS_BLOCK__MP1:
+ return RAS_TELEMETRY_GPU_BLOCK_MP1;
+ case AMDGPU_RAS_BLOCK__FUSE:
+ return RAS_TELEMETRY_GPU_BLOCK_FUSE;
+ case AMDGPU_RAS_BLOCK__MCA:
+ return RAS_TELEMETRY_GPU_BLOCK_MCA;
+ case AMDGPU_RAS_BLOCK__VCN:
+ return RAS_TELEMETRY_GPU_BLOCK_VCN;
+ case AMDGPU_RAS_BLOCK__JPEG:
+ return RAS_TELEMETRY_GPU_BLOCK_JPEG;
+ case AMDGPU_RAS_BLOCK__IH:
+ return RAS_TELEMETRY_GPU_BLOCK_IH;
+ case AMDGPU_RAS_BLOCK__MPIO:
+ return RAS_TELEMETRY_GPU_BLOCK_MPIO;
+ default:
+ DRM_WARN_ONCE("Unsupported SRIOV RAS telemetry block 0x%x\n",
+ block);
+ return RAS_TELEMETRY_GPU_BLOCK_COUNT;
+ }
+}
+
+static int amdgpu_virt_cache_host_error_counts(struct amdgpu_device *adev,
+ struct amdsriov_ras_telemetry *host_telemetry)
+{
+ struct amd_sriov_ras_telemetry_error_count *tmp = NULL;
+ uint32_t checksum, used_size;
+
+ checksum = host_telemetry->header.checksum;
+ used_size = host_telemetry->header.used_size;
+
+ if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
+ return 0;
+
+ tmp = kmemdup(&host_telemetry->body.error_count, used_size, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ if (checksum != amd_sriov_msg_checksum(tmp, used_size, 0, 0))
+ goto out;
+
+ memcpy(&adev->virt.count_cache, tmp,
+ min(used_size, sizeof(adev->virt.count_cache)));
+out:
+ kfree(tmp);
+
+ return 0;
+}
+
+static int amdgpu_virt_req_ras_err_count_internal(struct amdgpu_device *adev, bool force_update)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (!virt->ops || !virt->ops->req_ras_err_count)
+ return -EOPNOTSUPP;
+
+ /* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host
+ * will ignore incoming guest messages. Ratelimit the guest messages to
+ * prevent guest self DOS.
+ */
+ if (__ratelimit(&virt->ras.ras_error_cnt_rs) || force_update) {
+ mutex_lock(&virt->ras.ras_telemetry_mutex);
+ if (!virt->ops->req_ras_err_count(adev))
+ amdgpu_virt_cache_host_error_counts(adev,
+ virt->fw_reserve.ras_telemetry);
+ mutex_unlock(&virt->ras.ras_telemetry_mutex);
+ }
+
+ return 0;
+}
+
+/* Bypass ACA interface and query ECC counts directly from host */
+int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_block block,
+ struct ras_err_data *err_data)
+{
+ enum amd_sriov_ras_telemetry_gpu_block sriov_block;
+
+ sriov_block = amdgpu_ras_block_to_sriov(adev, block);
+
+ if (sriov_block >= RAS_TELEMETRY_GPU_BLOCK_COUNT ||
+ !amdgpu_sriov_ras_telemetry_block_en(adev, sriov_block))
+ return -EOPNOTSUPP;
+
+ /* Host Access may be lost during reset, just return last cached data. */
+ if (down_read_trylock(&adev->reset_domain->sem)) {
+ amdgpu_virt_req_ras_err_count_internal(adev, false);
+ up_read(&adev->reset_domain->sem);
+ }
+
+ err_data->ue_count = adev->virt.count_cache.block[sriov_block].ue_count;
+ err_data->ce_count = adev->virt.count_cache.block[sriov_block].ce_count;
+ err_data->de_count = adev->virt.count_cache.block[sriov_block].de_count;
+
+ return 0;
+}
+
+static int
+amdgpu_virt_write_cpers_to_ring(struct amdgpu_device *adev,
+ struct amdsriov_ras_telemetry *host_telemetry,
+ u32 *more)
+{
+ struct amd_sriov_ras_cper_dump *cper_dump = NULL;
+ struct cper_hdr *entry = NULL;
+ struct amdgpu_ring *ring = &adev->cper.ring_buf;
+ uint32_t checksum, used_size, i;
+ int ret = 0;
+
+ checksum = host_telemetry->header.checksum;
+ used_size = host_telemetry->header.used_size;
+
+ if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
+ return -EINVAL;
+
+ cper_dump = kmemdup(&host_telemetry->body.cper_dump, used_size, GFP_KERNEL);
+ if (!cper_dump)
+ return -ENOMEM;
+
+ if (checksum != amd_sriov_msg_checksum(cper_dump, used_size, 0, 0)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ *more = cper_dump->more;
+
+ if (cper_dump->wptr < adev->virt.ras.cper_rptr) {
+ dev_warn(
+ adev->dev,
+ "guest specified rptr that was too high! guest rptr: 0x%llx, host rptr: 0x%llx\n",
+ adev->virt.ras.cper_rptr, cper_dump->wptr);
+
+ adev->virt.ras.cper_rptr = cper_dump->wptr;
+ goto out;
+ }
+
+ entry = (struct cper_hdr *)&cper_dump->buf[0];
+
+ for (i = 0; i < cper_dump->count; i++) {
+ amdgpu_cper_ring_write(ring, entry, entry->record_length);
+ entry = (struct cper_hdr *)((char *)entry +
+ entry->record_length);
+ }
+
+ if (cper_dump->overflow_count)
+ dev_warn(adev->dev,
+ "host reported CPER overflow of 0x%llx entries!\n",
+ cper_dump->overflow_count);
+
+ adev->virt.ras.cper_rptr = cper_dump->wptr;
+out:
+ kfree(cper_dump);
+
+ return ret;
+}
+
+static int amdgpu_virt_req_ras_cper_dump_internal(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+ int ret = 0;
+ uint32_t more = 0;
+
+ if (!virt->ops || !virt->ops->req_ras_cper_dump)
+ return -EOPNOTSUPP;
+
+ do {
+ if (!virt->ops->req_ras_cper_dump(adev, virt->ras.cper_rptr))
+ ret = amdgpu_virt_write_cpers_to_ring(
+ adev, virt->fw_reserve.ras_telemetry, &more);
+ else
+ ret = 0;
+ } while (more && !ret);
+
+ return ret;
+}
+
+int amdgpu_virt_req_ras_cper_dump(struct amdgpu_device *adev, bool force_update)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+ int ret = 0;
+
+ if (!amdgpu_sriov_ras_cper_en(adev))
+ return -EOPNOTSUPP;
+
+ if ((__ratelimit(&virt->ras.ras_cper_dump_rs) || force_update) &&
+ down_read_trylock(&adev->reset_domain->sem)) {
+ mutex_lock(&virt->ras.ras_telemetry_mutex);
+ ret = amdgpu_virt_req_ras_cper_dump_internal(adev);
+ mutex_unlock(&virt->ras.ras_telemetry_mutex);
+ up_read(&adev->reset_domain->sem);
+ }
+
+ return ret;
+}
+
+int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev)
+{
+ unsigned long ue_count, ce_count;
+
+ if (amdgpu_sriov_ras_telemetry_en(adev)) {
+ amdgpu_virt_req_ras_err_count_internal(adev, true);
+ amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL);
+ }
+
+ return 0;
+}
+
+bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block)
+{
+ enum amd_sriov_ras_telemetry_gpu_block sriov_block;
+
+ sriov_block = amdgpu_ras_block_to_sriov(adev, block);
+
+ if (sriov_block >= RAS_TELEMETRY_GPU_BLOCK_COUNT ||
+ !amdgpu_sriov_ras_telemetry_block_en(adev, sriov_block))
+ return false;
+
+ return true;
+}
+
+/*
+ * amdgpu_virt_request_bad_pages() - request bad pages
+ * @adev: amdgpu device.
+ * Send command to GPU hypervisor to write new bad pages into the shared PF2VF region
+ */
+void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (virt->ops && virt->ops->req_bad_pages)
+ virt->ops->req_bad_pages(adev);
+}
+
+static int amdgpu_virt_cache_chk_criti_hit(struct amdgpu_device *adev,
+ struct amdsriov_ras_telemetry *host_telemetry,
+ bool *hit)
+{
+ struct amd_sriov_ras_chk_criti *tmp = NULL;
+ uint32_t checksum, used_size;
+
+ checksum = host_telemetry->header.checksum;
+ used_size = host_telemetry->header.used_size;
+
+ if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10))
+ return 0;
+
+ tmp = kmemdup(&host_telemetry->body.chk_criti, used_size, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ if (checksum != amd_sriov_msg_checksum(tmp, used_size, 0, 0))
+ goto out;
+
+ if (hit)
+ *hit = tmp->hit ? true : false;
+
+out:
+ kfree(tmp);
+
+ return 0;
+}
+
+int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+ int r = -EPERM;
+
+ if (!virt->ops || !virt->ops->req_ras_chk_criti)
+ return -EOPNOTSUPP;
+
+ /* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host
+ * will ignore incoming guest messages. Ratelimit the guest messages to
+ * prevent guest self DOS.
+ */
+ if (__ratelimit(&virt->ras.ras_chk_criti_rs)) {
+ mutex_lock(&virt->ras.ras_telemetry_mutex);
+ if (!virt->ops->req_ras_chk_criti(adev, addr))
+ r = amdgpu_virt_cache_chk_criti_hit(
+ adev, virt->fw_reserve.ras_telemetry, hit);
+ mutex_unlock(&virt->ras.ras_telemetry_mutex);
+ }
+
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index a8ed162cc0bc..d1172c8e58c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -24,11 +24,41 @@
#ifndef AMDGPU_VIRT_H
#define AMDGPU_VIRT_H
+#include "amdgv_sriovmsg.h"
+
#define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS (1 << 0) /* vBIOS is sr-iov ready */
#define AMDGPU_SRIOV_CAPS_ENABLE_IOV (1 << 1) /* sr-iov is enabled on this GPU */
#define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */
#define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */
#define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */
+#define AMDGPU_VF_MMIO_ACCESS_PROTECT (1 << 5) /* MMIO write access is not allowed in sriov runtime */
+
+/* flags for indirect register access path supported by rlcg for sriov */
+#define AMDGPU_RLCG_GC_WRITE_LEGACY (0x8 << 28)
+#define AMDGPU_RLCG_GC_WRITE (0x0 << 28)
+#define AMDGPU_RLCG_GC_READ (0x1 << 28)
+#define AMDGPU_RLCG_MMHUB_WRITE (0x2 << 28)
+
+/* error code for indirect register access path supported by rlcg for sriov */
+#define AMDGPU_RLCG_VFGATE_DISABLED 0x4000000
+#define AMDGPU_RLCG_WRONG_OPERATION_TYPE 0x2000000
+#define AMDGPU_RLCG_REG_NOT_IN_RANGE 0x1000000
+
+#define AMDGPU_RLCG_SCRATCH1_ADDRESS_MASK 0xFFFFF
+#define AMDGPU_RLCG_SCRATCH1_ERROR_MASK 0xF000000
+
+/* all asic after AI use this offset */
+#define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5
+/* tonga/fiji use this offset */
+#define mmBIF_IOV_FUNC_IDENTIFIER 0x1503
+
+#define AMDGPU_VF2PF_UPDATE_MAX_RETRY_LIMIT 2
+
+enum amdgpu_sriov_vf_mode {
+ SRIOV_VF_MODE_BARE_METAL = 0,
+ SRIOV_VF_MODE_ONE_VF,
+ SRIOV_VF_MODE_MULTI_VF,
+};
struct amdgpu_mm_table {
struct amdgpu_bo *bo;
@@ -36,33 +66,257 @@ struct amdgpu_mm_table {
uint64_t gpu_addr;
};
+#define AMDGPU_VF_ERROR_ENTRY_SIZE 16
+
+/* struct error_entry - amdgpu VF error information. */
+struct amdgpu_vf_error_buffer {
+ struct mutex lock;
+ int read_count;
+ int write_count;
+ uint16_t code[AMDGPU_VF_ERROR_ENTRY_SIZE];
+ uint16_t flags[AMDGPU_VF_ERROR_ENTRY_SIZE];
+ uint64_t data[AMDGPU_VF_ERROR_ENTRY_SIZE];
+};
+
+enum idh_request;
+
/**
* struct amdgpu_virt_ops - amdgpu device virt operations
*/
struct amdgpu_virt_ops {
int (*req_full_gpu)(struct amdgpu_device *adev, bool init);
int (*rel_full_gpu)(struct amdgpu_device *adev, bool init);
+ int (*req_init_data)(struct amdgpu_device *adev);
int (*reset_gpu)(struct amdgpu_device *adev);
+ void (*ready_to_reset)(struct amdgpu_device *adev);
+ int (*wait_reset)(struct amdgpu_device *adev);
+ void (*trans_msg)(struct amdgpu_device *adev, enum idh_request req,
+ u32 data1, u32 data2, u32 data3);
+ void (*ras_poison_handler)(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block);
+ bool (*rcvd_ras_intr)(struct amdgpu_device *adev);
+ int (*req_ras_err_count)(struct amdgpu_device *adev);
+ int (*req_ras_cper_dump)(struct amdgpu_device *adev, u64 vf_rptr);
+ int (*req_bad_pages)(struct amdgpu_device *adev);
+ int (*req_ras_chk_criti)(struct amdgpu_device *adev, u64 addr);
+};
+
+/*
+ * Firmware Reserve Frame buffer
+ */
+struct amdgpu_virt_fw_reserve {
+ struct amd_sriov_msg_pf2vf_info_header *p_pf2vf;
+ struct amd_sriov_msg_vf2pf_info_header *p_vf2pf;
+ void *ras_telemetry;
+ unsigned int checksum_key;
+};
+
+/*
+ * Legacy GIM header
+ *
+ * Defination between PF and VF
+ * Structures forcibly aligned to 4 to keep the same style as PF.
+ */
+#define AMDGIM_DATAEXCHANGE_OFFSET (64 * 1024)
+
+#define AMDGIM_GET_STRUCTURE_RESERVED_SIZE(total, u8, u16, u32, u64) \
+ (total - (((u8)+3) / 4 + ((u16)+1) / 2 + (u32) + (u64)*2))
+
+enum AMDGIM_FEATURE_FLAG {
+ /* GIM supports feature of Error log collecting */
+ AMDGIM_FEATURE_ERROR_LOG_COLLECT = 0x1,
+ /* GIM supports feature of loading uCodes */
+ AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
+ /* VRAM LOST by GIM */
+ AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
+ /* MM bandwidth */
+ AMDGIM_FEATURE_GIM_MM_BW_MGR = 0x8,
+ /* PP ONE VF MODE in GIM */
+ AMDGIM_FEATURE_PP_ONE_VF = (1 << 4),
+ /* Indirect Reg Access enabled */
+ AMDGIM_FEATURE_INDIRECT_REG_ACCESS = (1 << 5),
+ /* AV1 Support MODE*/
+ AMDGIM_FEATURE_AV1_SUPPORT = (1 << 6),
+ /* VCN RB decouple */
+ AMDGIM_FEATURE_VCN_RB_DECOUPLE = (1 << 7),
+ /* MES info */
+ AMDGIM_FEATURE_MES_INFO_ENABLE = (1 << 8),
+ AMDGIM_FEATURE_RAS_CAPS = (1 << 9),
+ AMDGIM_FEATURE_RAS_TELEMETRY = (1 << 10),
+ AMDGIM_FEATURE_RAS_CPER = (1 << 11),
+};
+
+enum AMDGIM_REG_ACCESS_FLAG {
+ /* Use PSP to program IH_RB_CNTL */
+ AMDGIM_FEATURE_IH_REG_PSP_EN = (1 << 0),
+ /* Use RLC to program MMHUB regs */
+ AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1),
+ /* Use RLC to program GC regs */
+ AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2),
+ /* Use PSP to program L1_TLB_CNTL */
+ AMDGIM_FEATURE_L1_TLB_CNTL_PSP_EN = (1 << 3),
+ /* Use RLCG to program SQ_CONFIG1 */
+ AMDGIM_FEATURE_REG_ACCESS_SQ_CONFIG = (1 << 4),
+};
+
+struct amdgim_pf2vf_info_v1 {
+ /* header contains size and version */
+ struct amd_sriov_msg_pf2vf_info_header header;
+ /* max_width * max_height */
+ unsigned int uvd_enc_max_pixels_count;
+ /* 16x16 pixels/sec, codec independent */
+ unsigned int uvd_enc_max_bandwidth;
+ /* max_width * max_height */
+ unsigned int vce_enc_max_pixels_count;
+ /* 16x16 pixels/sec, codec independent */
+ unsigned int vce_enc_max_bandwidth;
+ /* MEC FW position in kb from the start of visible frame buffer */
+ unsigned int mecfw_kboffset;
+ /* The features flags of the GIM driver supports. */
+ unsigned int feature_flags;
+ /* use private key from mailbox 2 to create chueksum */
+ unsigned int checksum;
+} __aligned(4);
+
+struct amdgim_vf2pf_info_v1 {
+ /* header contains size and version */
+ struct amd_sriov_msg_vf2pf_info_header header;
+ /* driver version */
+ char driver_version[64];
+ /* driver certification, 1=WHQL, 0=None */
+ unsigned int driver_cert;
+ /* guest OS type and version: need a define */
+ unsigned int os_info;
+ /* in the unit of 1M */
+ unsigned int fb_usage;
+ /* guest gfx engine usage percentage */
+ unsigned int gfx_usage;
+ /* guest gfx engine health percentage */
+ unsigned int gfx_health;
+ /* guest compute engine usage percentage */
+ unsigned int compute_usage;
+ /* guest compute engine health percentage */
+ unsigned int compute_health;
+ /* guest vce engine usage percentage. 0xffff means N/A. */
+ unsigned int vce_enc_usage;
+ /* guest vce engine health percentage. 0xffff means N/A. */
+ unsigned int vce_enc_health;
+ /* guest uvd engine usage percentage. 0xffff means N/A. */
+ unsigned int uvd_enc_usage;
+ /* guest uvd engine usage percentage. 0xffff means N/A. */
+ unsigned int uvd_enc_health;
+ unsigned int checksum;
+} __aligned(4);
+
+struct amdgim_vf2pf_info_v2 {
+ /* header contains size and version */
+ struct amd_sriov_msg_vf2pf_info_header header;
+ uint32_t checksum;
+ /* driver version */
+ uint8_t driver_version[64];
+ /* driver certification, 1=WHQL, 0=None */
+ uint32_t driver_cert;
+ /* guest OS type and version: need a define */
+ uint32_t os_info;
+ /* in the unit of 1M */
+ uint32_t fb_usage;
+ /* guest gfx engine usage percentage */
+ uint32_t gfx_usage;
+ /* guest gfx engine health percentage */
+ uint32_t gfx_health;
+ /* guest compute engine usage percentage */
+ uint32_t compute_usage;
+ /* guest compute engine health percentage */
+ uint32_t compute_health;
+ /* guest vce engine usage percentage. 0xffff means N/A. */
+ uint32_t vce_enc_usage;
+ /* guest vce engine health percentage. 0xffff means N/A. */
+ uint32_t vce_enc_health;
+ /* guest uvd engine usage percentage. 0xffff means N/A. */
+ uint32_t uvd_enc_usage;
+ /* guest uvd engine usage percentage. 0xffff means N/A. */
+ uint32_t uvd_enc_health;
+ uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amd_sriov_msg_vf2pf_info_header)/sizeof(uint32_t)), 0)];
+} __aligned(4);
+
+struct amdgpu_virt_ras_err_handler_data {
+ /* point to bad page records array */
+ struct eeprom_table_record *bps;
+ /* point to reserved bo array */
+ struct amdgpu_bo **bps_bo;
+ /* the count of entries */
+ int count;
+ /* last reserved entry's index + 1 */
+ int last_reserved;
};
+struct amdgpu_virt_ras {
+ struct ratelimit_state ras_error_cnt_rs;
+ struct ratelimit_state ras_cper_dump_rs;
+ struct ratelimit_state ras_chk_criti_rs;
+ struct mutex ras_telemetry_mutex;
+ uint64_t cper_rptr;
+};
+
+#define AMDGPU_VIRT_CAPS_LIST(X) X(AMDGPU_VIRT_CAP_POWER_LIMIT)
+
+DECLARE_ATTR_CAP_CLASS(amdgpu_virt, AMDGPU_VIRT_CAPS_LIST);
+
/* GPU virtualization */
struct amdgpu_virt {
uint32_t caps;
struct amdgpu_bo *csa_obj;
- uint64_t csa_vmid0_addr;
+ void *csa_cpu_addr;
bool chained_ib_support;
uint32_t reg_val_offs;
- struct mutex lock_kiq;
- struct mutex lock_reset;
struct amdgpu_irq_src ack_irq;
struct amdgpu_irq_src rcv_irq;
+
struct work_struct flr_work;
+ struct work_struct req_bad_pages_work;
+ struct work_struct handle_bad_pages_work;
+
struct amdgpu_mm_table mm_table;
const struct amdgpu_virt_ops *ops;
+ struct amdgpu_vf_error_buffer vf_errors;
+ struct amdgpu_virt_fw_reserve fw_reserve;
+ struct amdgpu_virt_caps virt_caps;
+ uint32_t gim_feature;
+ uint32_t reg_access_mode;
+ int req_init_data_ver;
+ bool tdr_debug;
+ struct amdgpu_virt_ras_err_handler_data *virt_eh_data;
+ bool ras_init_done;
+ uint32_t reg_access;
+
+ /* vf2pf message */
+ struct delayed_work vf2pf_work;
+ uint32_t vf2pf_update_interval_ms;
+ int vf2pf_update_retry_cnt;
+
+ /* multimedia bandwidth config */
+ bool is_mm_bw_enabled;
+ uint32_t decode_max_dimension_pixels;
+ uint32_t decode_max_frame_pixels;
+ uint32_t encode_max_dimension_pixels;
+ uint32_t encode_max_frame_pixels;
+
+ /* the ucode id to signal the autoload */
+ uint32_t autoload_ucode_id;
+
+ /* Spinlock to protect access to the RLCG register interface */
+ spinlock_t rlcg_reg_lock;
+
+ union amd_sriov_ras_caps ras_en_caps;
+ union amd_sriov_ras_caps ras_telemetry_en_caps;
+ struct amdgpu_virt_ras ras;
+ struct amd_sriov_ras_telemetry_error_count count_cache;
+
+ /* hibernate and resume with different VF feature for xgmi enabled system */
+ bool is_xgmi_node_migrate_enabled;
};
-#define AMDGPU_CSA_SIZE (8 * 1024)
-#define AMDGPU_CSA_VADDR (AMDGPU_VA_RESERVED_SIZE - AMDGPU_CSA_SIZE)
+struct amdgpu_video_codec_info;
#define amdgpu_sriov_enabled(adev) \
((adev)->virt.caps & AMDGPU_SRIOV_CAPS_ENABLE_IOV)
@@ -76,29 +330,130 @@ struct amdgpu_virt {
#define amdgpu_sriov_runtime(adev) \
((adev)->virt.caps & AMDGPU_SRIOV_CAPS_RUNTIME)
+#define amdgpu_sriov_fullaccess(adev) \
+(amdgpu_sriov_vf((adev)) && !amdgpu_sriov_runtime((adev)))
+
+#define amdgpu_sriov_reg_indirect_en(adev) \
+(amdgpu_sriov_vf((adev)) && \
+ ((adev)->virt.gim_feature & (AMDGIM_FEATURE_INDIRECT_REG_ACCESS)))
+
+#define amdgpu_sriov_reg_indirect_ih(adev) \
+(amdgpu_sriov_vf((adev)) && \
+ ((adev)->virt.reg_access & (AMDGIM_FEATURE_IH_REG_PSP_EN)))
+
+#define amdgpu_sriov_reg_indirect_mmhub(adev) \
+(amdgpu_sriov_vf((adev)) && \
+ ((adev)->virt.reg_access & (AMDGIM_FEATURE_MMHUB_REG_RLC_EN)))
+
+#define amdgpu_sriov_reg_indirect_gc(adev) \
+(amdgpu_sriov_vf((adev)) && \
+ ((adev)->virt.reg_access & (AMDGIM_FEATURE_GC_REG_RLC_EN)))
+
+#define amdgpu_sriov_reg_indirect_l1_tlb_cntl(adev) \
+(amdgpu_sriov_vf((adev)) && \
+ ((adev)->virt.reg_access & (AMDGIM_FEATURE_L1_TLB_CNTL_PSP_EN)))
+
+#define amdgpu_sriov_rlcg_error_report_enabled(adev) \
+ (amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev))
+
+#define amdgpu_sriov_reg_access_sq_config(adev) \
+(amdgpu_sriov_vf((adev)) && \
+ ((adev)->virt.reg_access & (AMDGIM_FEATURE_REG_ACCESS_SQ_CONFIG)))
+
#define amdgpu_passthrough(adev) \
((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
+#define amdgpu_sriov_vf_mmio_access_protection(adev) \
+((adev)->virt.caps & AMDGPU_VF_MMIO_ACCESS_PROTECT)
+
+#define amdgpu_sriov_ras_caps_en(adev) \
+((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_CAPS)
+
+#define amdgpu_sriov_ras_telemetry_en(adev) \
+(((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_TELEMETRY) && (adev)->virt.fw_reserve.ras_telemetry)
+
+#define amdgpu_sriov_ras_telemetry_block_en(adev, sriov_blk) \
+(amdgpu_sriov_ras_telemetry_en((adev)) && (adev)->virt.ras_telemetry_en_caps.all & BIT(sriov_blk))
+
+#define amdgpu_sriov_ras_cper_en(adev) \
+((adev)->virt.gim_feature & AMDGIM_FEATURE_RAS_CPER)
+
static inline bool is_virtual_machine(void)
{
-#ifdef CONFIG_X86
+#if defined(CONFIG_X86)
return boot_cpu_has(X86_FEATURE_HYPERVISOR);
+#elif defined(CONFIG_ARM64)
+ return !is_kernel_in_hyp_mode();
#else
return false;
#endif
}
-struct amdgpu_vm;
-int amdgpu_allocate_static_csa(struct amdgpu_device *adev);
-int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+#define amdgpu_sriov_is_pp_one_vf(adev) \
+ ((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF)
+#define amdgpu_sriov_multi_vf_mode(adev) \
+ (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+#define amdgpu_sriov_is_debug(adev) \
+ ((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug)
+#define amdgpu_sriov_is_normal(adev) \
+ ((!amdgpu_in_reset(adev)) && (!adev->virt.tdr_debug))
+#define amdgpu_sriov_is_av1_support(adev) \
+ ((adev)->virt.gim_feature & AMDGIM_FEATURE_AV1_SUPPORT)
+#define amdgpu_sriov_is_vcn_rb_decouple(adev) \
+ ((adev)->virt.gim_feature & AMDGIM_FEATURE_VCN_RB_DECOUPLE)
+#define amdgpu_sriov_is_mes_info_enable(adev) \
+ ((adev)->virt.gim_feature & AMDGIM_FEATURE_MES_INFO_ENABLE)
+
+#define amdgpu_virt_xgmi_migrate_enabled(adev) \
+ ((adev)->virt.is_xgmi_node_migrate_enabled && (adev)->gmc.xgmi.node_segment_size != 0)
+
+bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
-uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
-void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
-int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, bool voluntary);
+void amdgpu_virt_request_init_data(struct amdgpu_device *adev);
+void amdgpu_virt_ready_to_reset(struct amdgpu_device *adev);
+int amdgpu_virt_wait_reset(struct amdgpu_device *adev);
int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
+bool amdgpu_virt_rcvd_ras_interrupt(struct amdgpu_device *adev);
+void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev);
+void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
+void amdgpu_virt_exchange_data(struct amdgpu_device *adev);
+void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev);
+void amdgpu_virt_init(struct amdgpu_device *adev);
+
+bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev);
+int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev);
+void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev);
+
+enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev);
+void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
+ struct amdgpu_video_codec_info *encode, uint32_t encode_array_size,
+ struct amdgpu_video_codec_info *decode, uint32_t decode_array_size);
+void amdgpu_sriov_wreg(struct amdgpu_device *adev,
+ u32 offset, u32 value,
+ u32 acc_flags, u32 hwip, u32 xcc_id);
+u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
+ u32 offset, u32 acc_flags, u32 hwip, u32 xcc_id);
+bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev,
+ uint32_t ucode_id);
+void amdgpu_virt_pre_reset(struct amdgpu_device *adev);
+void amdgpu_virt_post_reset(struct amdgpu_device *adev);
+bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev);
+bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
+ u32 acc_flags, u32 hwip,
+ bool write, u32 *rlcg_flag);
+u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id);
+bool amdgpu_virt_get_ras_capability(struct amdgpu_device *adev);
+int amdgpu_virt_req_ras_err_count(struct amdgpu_device *adev, enum amdgpu_ras_block block,
+ struct ras_err_data *err_data);
+int amdgpu_virt_req_ras_cper_dump(struct amdgpu_device *adev, bool force_update);
+int amdgpu_virt_ras_telemetry_post_reset(struct amdgpu_device *adev);
+bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block);
+void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev);
+int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
new file mode 100644
index 000000000000..79bad9cbe2ab
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -0,0 +1,659 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "amdgpu.h"
+#ifdef CONFIG_DRM_AMDGPU_SI
+#include "dce_v6_0.h"
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+#include "dce_v8_0.h"
+#endif
+#include "dce_v10_0.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
+#include "amdgpu_vkms.h"
+#include "amdgpu_display.h"
+#include "atom.h"
+#include "amdgpu_irq.h"
+
+/**
+ * DOC: amdgpu_vkms
+ *
+ * The amdgpu vkms interface provides a virtual KMS interface for several use
+ * cases: devices without display hardware, platforms where the actual display
+ * hardware is not useful (e.g., servers), SR-IOV virtual functions, device
+ * emulation/simulation, and device bring up prior to display hardware being
+ * usable. We previously emulated a legacy KMS interface, but there was a desire
+ * to move to the atomic KMS interface. The vkms driver did everything we
+ * needed, but we wanted KMS support natively in the driver without buffer
+ * sharing and the ability to support an instance of VKMS per device. We first
+ * looked at splitting vkms into a stub driver and a helper module that other
+ * drivers could use to implement a virtual display, but this strategy ended up
+ * being messy due to driver specific callbacks needed for buffer management.
+ * Ultimately, it proved easier to import the vkms code as it mostly used core
+ * drm helpers anyway.
+ */
+
+static const u32 amdgpu_vkms_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer)
+{
+ struct amdgpu_crtc *amdgpu_crtc = container_of(timer, struct amdgpu_crtc, vblank_timer);
+ struct drm_crtc *crtc = &amdgpu_crtc->base;
+ struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc);
+ u64 ret_overrun;
+ bool ret;
+
+ ret_overrun = hrtimer_forward_now(&amdgpu_crtc->vblank_timer,
+ output->period_ns);
+ if (ret_overrun != 1)
+ DRM_WARN("%s: vblank timer overrun\n", __func__);
+
+ ret = drm_crtc_handle_vblank(crtc);
+ /* Don't queue timer again when vblank is disabled. */
+ if (!ret)
+ return HRTIMER_NORESTART;
+
+ return HRTIMER_RESTART;
+}
+
+static int amdgpu_vkms_enable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
+ struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc);
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ drm_calc_timestamping_constants(crtc, &crtc->mode);
+
+ out->period_ns = ktime_set(0, vblank->framedur_ns);
+ hrtimer_start(&amdgpu_crtc->vblank_timer, out->period_ns, HRTIMER_MODE_REL);
+
+ return 0;
+}
+
+static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc)
+{
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ hrtimer_try_to_cancel(&amdgpu_crtc->vblank_timer);
+}
+
+static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
+ int *max_error,
+ ktime_t *vblank_time,
+ bool in_vblank_irq)
+{
+ struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc);
+ struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ if (!READ_ONCE(vblank->enabled)) {
+ *vblank_time = ktime_get();
+ return true;
+ }
+
+ *vblank_time = READ_ONCE(amdgpu_crtc->vblank_timer.node.expires);
+
+ if (WARN_ON(*vblank_time == vblank->time))
+ return true;
+
+ /*
+ * To prevent races we roll the hrtimer forward before we do any
+ * interrupt processing - this is how real hw works (the interrupt is
+ * only generated after all the vblank registers are updated) and what
+ * the vblank core expects. Therefore we need to always correct the
+ * timestampe by one frame.
+ */
+ *vblank_time -= output->period_ns;
+
+ return true;
+}
+
+static const struct drm_crtc_funcs amdgpu_vkms_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = drm_crtc_cleanup,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = amdgpu_vkms_enable_vblank,
+ .disable_vblank = amdgpu_vkms_disable_vblank,
+ .get_vblank_timestamp = amdgpu_vkms_get_vblank_timestamp,
+};
+
+static void amdgpu_vkms_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ drm_crtc_vblank_on(crtc);
+}
+
+static void amdgpu_vkms_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ drm_crtc_vblank_off(crtc);
+}
+
+static void amdgpu_vkms_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ unsigned long flags;
+ if (crtc->state->event) {
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+ if (drm_crtc_vblank_get(crtc) != 0)
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ else
+ drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ crtc->state->event = NULL;
+ }
+}
+
+static const struct drm_crtc_helper_funcs amdgpu_vkms_crtc_helper_funcs = {
+ .atomic_flush = amdgpu_vkms_crtc_atomic_flush,
+ .atomic_enable = amdgpu_vkms_crtc_atomic_enable,
+ .atomic_disable = amdgpu_vkms_crtc_atomic_disable,
+};
+
+static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_plane *primary, struct drm_plane *cursor)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ int ret;
+
+ ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor,
+ &amdgpu_vkms_crtc_funcs, NULL);
+ if (ret) {
+ DRM_ERROR("Failed to init CRTC\n");
+ return ret;
+ }
+
+ drm_crtc_helper_add(crtc, &amdgpu_vkms_crtc_helper_funcs);
+
+ amdgpu_crtc->crtc_id = drm_crtc_index(crtc);
+ adev->mode_info.crtcs[drm_crtc_index(crtc)] = amdgpu_crtc;
+
+ amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
+ amdgpu_crtc->encoder = NULL;
+ amdgpu_crtc->connector = NULL;
+ amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
+
+ hrtimer_setup(&amdgpu_crtc->vblank_timer, &amdgpu_vkms_vblank_simulate, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+
+ return ret;
+}
+
+static const struct drm_connector_funcs amdgpu_vkms_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int amdgpu_vkms_conn_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *mode = NULL;
+ unsigned i;
+ static const struct mode_size {
+ int w;
+ int h;
+ } common_modes[] = {
+ { 640, 480},
+ { 720, 480},
+ { 800, 600},
+ { 848, 480},
+ {1024, 768},
+ {1152, 768},
+ {1280, 720},
+ {1280, 800},
+ {1280, 854},
+ {1280, 960},
+ {1280, 1024},
+ {1440, 900},
+ {1400, 1050},
+ {1680, 1050},
+ {1600, 1200},
+ {1920, 1080},
+ {1920, 1200},
+ {2560, 1440},
+ {4096, 3112},
+ {3656, 2664},
+ {3840, 2160},
+ {4096, 2160},
+ };
+
+ for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
+ mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
+ if (!mode)
+ continue;
+ drm_mode_probed_add(connector, mode);
+ }
+
+ drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
+
+ return ARRAY_SIZE(common_modes);
+}
+
+static const struct drm_connector_helper_funcs amdgpu_vkms_conn_helper_funcs = {
+ .get_modes = amdgpu_vkms_conn_get_modes,
+};
+
+static const struct drm_plane_funcs amdgpu_vkms_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static void amdgpu_vkms_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *old_state)
+{
+ return;
+}
+
+static int amdgpu_vkms_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc))
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state,
+ new_plane_state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, true);
+ if (ret != 0)
+ return ret;
+
+ /* for now primary plane must be visible and full screen */
+ if (!new_plane_state->visible)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int amdgpu_vkms_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct amdgpu_framebuffer *afb;
+ struct drm_gem_object *obj;
+ struct amdgpu_device *adev;
+ struct amdgpu_bo *rbo;
+ uint32_t domain;
+ int r;
+
+ if (!new_state->fb) {
+ DRM_DEBUG_KMS("No FB bound\n");
+ return 0;
+ }
+ afb = to_amdgpu_framebuffer(new_state->fb);
+
+ obj = drm_gem_fb_get_obj(new_state->fb, 0);
+ if (!obj) {
+ DRM_ERROR("Failed to get obj from framebuffer\n");
+ return -EINVAL;
+ }
+
+ rbo = gem_to_amdgpu_bo(obj);
+ adev = amdgpu_ttm_adev(rbo->tbo.bdev);
+
+ r = amdgpu_bo_reserve(rbo, true);
+ if (r) {
+ dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
+ return r;
+ }
+
+ r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);
+ if (r) {
+ dev_err(adev->dev, "allocating fence slot failed (%d)\n", r);
+ goto error_unlock;
+ }
+
+ if (plane->type != DRM_PLANE_TYPE_CURSOR)
+ domain = amdgpu_display_supported_domains(adev, rbo->flags);
+ else
+ domain = AMDGPU_GEM_DOMAIN_VRAM;
+
+ rbo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ r = amdgpu_bo_pin(rbo, domain);
+ if (unlikely(r != 0)) {
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
+ goto error_unlock;
+ }
+
+ r = amdgpu_ttm_alloc_gart(&rbo->tbo);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("%p bind failed\n", rbo);
+ goto error_unpin;
+ }
+
+ amdgpu_bo_unreserve(rbo);
+
+ afb->address = amdgpu_bo_gpu_offset(rbo);
+
+ amdgpu_bo_ref(rbo);
+
+ return 0;
+
+error_unpin:
+ amdgpu_bo_unpin(rbo);
+
+error_unlock:
+ amdgpu_bo_unreserve(rbo);
+ return r;
+}
+
+static void amdgpu_vkms_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct amdgpu_bo *rbo;
+ struct drm_gem_object *obj;
+ int r;
+
+ if (!old_state->fb)
+ return;
+
+ obj = drm_gem_fb_get_obj(old_state->fb, 0);
+ if (!obj) {
+ DRM_ERROR("Failed to get obj from framebuffer\n");
+ return;
+ }
+
+ rbo = gem_to_amdgpu_bo(obj);
+ r = amdgpu_bo_reserve(rbo, false);
+ if (unlikely(r)) {
+ DRM_ERROR("failed to reserve rbo before unpin\n");
+ return;
+ }
+
+ amdgpu_bo_unpin(rbo);
+ amdgpu_bo_unreserve(rbo);
+ amdgpu_bo_unref(&rbo);
+}
+
+static const struct drm_plane_helper_funcs amdgpu_vkms_primary_helper_funcs = {
+ .atomic_update = amdgpu_vkms_plane_atomic_update,
+ .atomic_check = amdgpu_vkms_plane_atomic_check,
+ .prepare_fb = amdgpu_vkms_prepare_fb,
+ .cleanup_fb = amdgpu_vkms_cleanup_fb,
+};
+
+static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev,
+ enum drm_plane_type type,
+ int index)
+{
+ struct drm_plane *plane;
+ int ret;
+
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_universal_plane_init(dev, plane, 1 << index,
+ &amdgpu_vkms_plane_funcs,
+ amdgpu_vkms_formats,
+ ARRAY_SIZE(amdgpu_vkms_formats),
+ NULL, type, NULL);
+ if (ret) {
+ kfree(plane);
+ return ERR_PTR(ret);
+ }
+
+ drm_plane_helper_add(plane, &amdgpu_vkms_primary_helper_funcs);
+
+ return plane;
+}
+
+static int amdgpu_vkms_output_init(struct drm_device *dev, struct
+ amdgpu_vkms_output *output, int index)
+{
+ struct drm_connector *connector = &output->connector;
+ struct drm_encoder *encoder = &output->encoder;
+ struct drm_crtc *crtc = &output->crtc.base;
+ struct drm_plane *primary, *cursor = NULL;
+ int ret;
+
+ primary = amdgpu_vkms_plane_init(dev, DRM_PLANE_TYPE_PRIMARY, index);
+ if (IS_ERR(primary))
+ return PTR_ERR(primary);
+
+ ret = amdgpu_vkms_crtc_init(dev, crtc, primary, cursor);
+ if (ret)
+ goto err_crtc;
+
+ ret = drm_connector_init(dev, connector, &amdgpu_vkms_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ if (ret) {
+ DRM_ERROR("Failed to init connector\n");
+ goto err_connector;
+ }
+
+ drm_connector_helper_add(connector, &amdgpu_vkms_conn_helper_funcs);
+
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL);
+ if (ret) {
+ DRM_ERROR("Failed to init encoder\n");
+ goto err_encoder;
+ }
+ encoder->possible_crtcs = 1 << index;
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret) {
+ DRM_ERROR("Failed to attach connector to encoder\n");
+ goto err_attach;
+ }
+
+ drm_mode_config_reset(dev);
+
+ return 0;
+
+err_attach:
+ drm_encoder_cleanup(encoder);
+
+err_encoder:
+ drm_connector_cleanup(connector);
+
+err_connector:
+ drm_crtc_cleanup(crtc);
+
+err_crtc:
+ drm_plane_cleanup(primary);
+
+ return ret;
+}
+
+const struct drm_mode_config_funcs amdgpu_vkms_mode_funcs = {
+ .fb_create = amdgpu_display_user_framebuffer_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int amdgpu_vkms_sw_init(struct amdgpu_ip_block *ip_block)
+{
+ int r, i;
+ struct amdgpu_device *adev = ip_block->adev;
+
+ adev->amdgpu_vkms_output = kcalloc(adev->mode_info.num_crtc,
+ sizeof(struct amdgpu_vkms_output), GFP_KERNEL);
+ if (!adev->amdgpu_vkms_output)
+ return -ENOMEM;
+
+ adev_to_drm(adev)->max_vblank_count = 0;
+
+ adev_to_drm(adev)->mode_config.funcs = &amdgpu_vkms_mode_funcs;
+
+ adev_to_drm(adev)->mode_config.max_width = XRES_MAX;
+ adev_to_drm(adev)->mode_config.max_height = YRES_MAX;
+
+ adev_to_drm(adev)->mode_config.preferred_depth = 24;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+
+ adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
+
+ r = amdgpu_display_modeset_create_props(adev);
+ if (r)
+ return r;
+
+ /* allocate crtcs, encoders, connectors */
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ r = amdgpu_vkms_output_init(adev_to_drm(adev), &adev->amdgpu_vkms_output[i], i);
+ if (r)
+ return r;
+ }
+
+ r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
+ if (r)
+ return r;
+
+ drm_kms_helper_poll_init(adev_to_drm(adev));
+
+ adev->mode_info.mode_config_initialized = true;
+ return 0;
+}
+
+static int amdgpu_vkms_sw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int i = 0;
+
+ for (i = 0; i < adev->mode_info.num_crtc; i++)
+ if (adev->mode_info.crtcs[i])
+ hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer);
+
+ drm_kms_helper_poll_fini(adev_to_drm(adev));
+ drm_mode_config_cleanup(adev_to_drm(adev));
+
+ adev->mode_info.mode_config_initialized = false;
+
+ drm_edid_free(adev->mode_info.bios_hardcoded_edid);
+ kfree(adev->amdgpu_vkms_output);
+ return 0;
+}
+
+static int amdgpu_vkms_hw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+
+ switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ dce_v6_0_disable_dce(adev);
+ break;
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ dce_v8_0_disable_dce(adev);
+ break;
+#endif
+ case CHIP_FIJI:
+ case CHIP_TONGA:
+ dce_v10_0_disable_dce(adev);
+ break;
+ case CHIP_TOPAZ:
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_HAINAN:
+#endif
+ /* no DCE */
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int amdgpu_vkms_hw_fini(struct amdgpu_ip_block *ip_block)
+{
+ return 0;
+}
+
+static int amdgpu_vkms_suspend(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = drm_mode_config_helper_suspend(adev_to_drm(adev));
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int amdgpu_vkms_resume(struct amdgpu_ip_block *ip_block)
+{
+ int r;
+
+ r = amdgpu_vkms_hw_init(ip_block);
+ if (r)
+ return r;
+ return drm_mode_config_helper_resume(adev_to_drm(ip_block->adev));
+}
+
+static bool amdgpu_vkms_is_idle(struct amdgpu_ip_block *ip_block)
+{
+ return true;
+}
+
+static int amdgpu_vkms_set_clockgating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int amdgpu_vkms_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = {
+ .name = "amdgpu_vkms",
+ .sw_init = amdgpu_vkms_sw_init,
+ .sw_fini = amdgpu_vkms_sw_fini,
+ .hw_init = amdgpu_vkms_hw_init,
+ .hw_fini = amdgpu_vkms_hw_fini,
+ .suspend = amdgpu_vkms_suspend,
+ .resume = amdgpu_vkms_resume,
+ .is_idle = amdgpu_vkms_is_idle,
+ .set_clockgating_state = amdgpu_vkms_set_clockgating_state,
+ .set_powergating_state = amdgpu_vkms_set_powergating_state,
+};
+
+const struct amdgpu_ip_block_version amdgpu_vkms_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_vkms_ip_funcs,
+};
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h
new file mode 100644
index 000000000000..4f8722ff37c2
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _AMDGPU_VKMS_H_
+#define _AMDGPU_VKMS_H_
+
+#define XRES_DEF 1024
+#define YRES_DEF 768
+
+#define XRES_MAX 16384
+#define YRES_MAX 16384
+
+#define drm_crtc_to_amdgpu_vkms_output(target) \
+ container_of(target, struct amdgpu_vkms_output, crtc.base)
+
+extern const struct amdgpu_ip_block_version amdgpu_vkms_ip_block;
+
+struct amdgpu_vkms_output {
+ struct amdgpu_crtc crtc;
+ struct drm_encoder encoder;
+ struct drm_connector connector;
+ ktime_t period_ns;
+ struct drm_pending_vblank_event *event;
+};
+
+#endif /* _AMDGPU_VKMS_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 07ff3b1514f1..c1a801203949 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -25,31 +25,66 @@
* Alex Deucher
* Jerome Glisse
*/
+
#include <linux/dma-fence-array.h>
#include <linux/interval_tree_generic.h>
-#include <drm/drmP.h>
+#include <linux/idr.h>
+#include <linux/dma-buf.h>
+
#include <drm/amdgpu_drm.h>
+#include <drm/drm_drv.h>
+#include <drm/ttm/ttm_tt.h>
+#include <drm/drm_exec.h>
#include "amdgpu.h"
+#include "amdgpu_vm.h"
#include "amdgpu_trace.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_gmc.h"
+#include "amdgpu_xgmi.h"
+#include "amdgpu_dma_buf.h"
+#include "amdgpu_res_cursor.h"
+#include "kfd_svm.h"
-/*
- * GPUVM
- * GPUVM is similar to the legacy gart on older asics, however
- * rather than there being a single global gart table
- * for the entire GPU, there are multiple VM page tables active
- * at any given time. The VM page tables can contain a mix
- * vram pages and system memory pages and system memory pages
+/**
+ * DOC: GPUVM
+ *
+ * GPUVM is the MMU functionality provided on the GPU.
+ * GPUVM is similar to the legacy GART on older asics, however
+ * rather than there being a single global GART table
+ * for the entire GPU, there can be multiple GPUVM page tables active
+ * at any given time. The GPUVM page tables can contain a mix
+ * VRAM pages and system pages (both memory and MMIO) and system pages
* can be mapped as snooped (cached system pages) or unsnooped
* (uncached system pages).
- * Each VM has an ID associated with it and there is a page table
- * associated with each VMID. When execting a command buffer,
- * the kernel tells the the ring what VMID to use for that command
+ *
+ * Each active GPUVM has an ID associated with it and there is a page table
+ * linked with each VMID. When executing a command buffer,
+ * the kernel tells the engine what VMID to use for that command
* buffer. VMIDs are allocated dynamically as commands are submitted.
* The userspace drivers maintain their own address space and the kernel
* sets up their pages tables accordingly when they submit their
* command buffers and a VMID is assigned.
- * Cayman/Trinity support up to 8 active VMs at any given time;
- * SI supports 16.
+ * The hardware supports up to 16 active GPUVMs at any given time.
+ *
+ * Each GPUVM is represented by a 1-2 or 1-5 level page table, depending
+ * on the ASIC family. GPUVM supports RWX attributes on each page as well
+ * as other features such as encryption and caching attributes.
+ *
+ * VMID 0 is special. It is the GPUVM used for the kernel driver. In
+ * addition to an aperture managed by a page table, VMID 0 also has
+ * several other apertures. There is an aperture for direct access to VRAM
+ * and there is a legacy AGP aperture which just forwards accesses directly
+ * to the matching system physical addresses (or IOVAs when an IOMMU is
+ * present). These apertures provide direct access to these memories without
+ * incurring the overhead of a page table. VMID 0 is used by the kernel
+ * driver for tasks like memory management.
+ *
+ * GPU clients (i.e., engines on the GPU) use GPUVM VMIDs to access memory.
+ * For user applications, each application can have their own unique GPUVM
+ * address space. The application manages the address space and the kernel
+ * driver manages the GPUVM page tables for each process. If an GPU client
+ * accesses an invalid page, it will generate a GPU page fault, similar to
+ * accessing an invalid page on a CPU.
*/
#define START(node) ((node)->start)
@@ -61,623 +96,800 @@ INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
#undef START
#undef LAST
-/* Local structure. Encapsulate some VM table update parameters to reduce
- * the number of function parameters
+/**
+ * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
*/
-struct amdgpu_pte_update_params {
- /* amdgpu device we do this update for */
+struct amdgpu_prt_cb {
+
+ /**
+ * @adev: amdgpu device
+ */
struct amdgpu_device *adev;
- /* optional amdgpu_vm we do this update for */
- struct amdgpu_vm *vm;
- /* address where to copy page table entries from */
- uint64_t src;
- /* indirect buffer to fill with commands */
- struct amdgpu_ib *ib;
- /* Function which actually does the update */
- void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
- uint64_t addr, unsigned count, uint32_t incr,
- uint64_t flags);
- /* indicate update pt or its shadow */
- bool shadow;
+
+ /**
+ * @cb: callback
+ */
+ struct dma_fence_cb cb;
};
-/* Helper to disable partial resident texture feature from a fence callback */
-struct amdgpu_prt_cb {
- struct amdgpu_device *adev;
+/**
+ * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence
+ */
+struct amdgpu_vm_tlb_seq_struct {
+ /**
+ * @vm: pointer to the amdgpu_vm structure to set the fence sequence on
+ */
+ struct amdgpu_vm *vm;
+
+ /**
+ * @cb: callback
+ */
struct dma_fence_cb cb;
};
/**
- * amdgpu_vm_num_entries - return the number of entries in a PD/PT
+ * amdgpu_vm_assert_locked - check if VM is correctly locked
+ * @vm: the VM which schould be tested
*
- * @adev: amdgpu_device pointer
+ * Asserts that the VM root PD is locked.
+ */
+static void amdgpu_vm_assert_locked(struct amdgpu_vm *vm)
+{
+ dma_resv_assert_held(vm->root.bo->tbo.base.resv);
+}
+
+/**
+ * amdgpu_vm_bo_evicted - vm_bo is evicted
+ *
+ * @vm_bo: vm_bo which is evicted
*
- * Calculate the number of entries in a page directory or page table.
+ * State for PDs/PTs and per VM BOs which are not at the location they should
+ * be.
*/
-static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
- unsigned level)
+static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
{
- if (level == 0)
- /* For the root directory */
- return adev->vm_manager.max_pfn >>
- (adev->vm_manager.block_size *
- adev->vm_manager.num_level);
- else if (level == adev->vm_manager.num_level)
- /* For the page tables on the leaves */
- return AMDGPU_VM_PTE_COUNT(adev);
+ struct amdgpu_vm *vm = vm_bo->vm;
+ struct amdgpu_bo *bo = vm_bo->bo;
+
+ vm_bo->moved = true;
+ amdgpu_vm_assert_locked(vm);
+ spin_lock(&vm_bo->vm->status_lock);
+ if (bo->tbo.type == ttm_bo_type_kernel)
+ list_move(&vm_bo->vm_status, &vm->evicted);
else
- /* Everything in between */
- return 1 << adev->vm_manager.block_size;
+ list_move_tail(&vm_bo->vm_status, &vm->evicted);
+ spin_unlock(&vm_bo->vm->status_lock);
+}
+/**
+ * amdgpu_vm_bo_moved - vm_bo is moved
+ *
+ * @vm_bo: vm_bo which is moved
+ *
+ * State for per VM BOs which are moved, but that change is not yet reflected
+ * in the page tables.
+ */
+static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
+{
+ amdgpu_vm_assert_locked(vm_bo->vm);
+ spin_lock(&vm_bo->vm->status_lock);
+ list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
+ spin_unlock(&vm_bo->vm->status_lock);
}
/**
- * amdgpu_vm_bo_size - returns the size of the BOs in bytes
+ * amdgpu_vm_bo_idle - vm_bo is idle
*
- * @adev: amdgpu_device pointer
+ * @vm_bo: vm_bo which is now idle
*
- * Calculate the size of the BO for a page directory or page table in bytes.
+ * State for PDs/PTs and per VM BOs which have gone through the state machine
+ * and are now idle.
*/
-static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
+static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
{
- return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8);
+ amdgpu_vm_assert_locked(vm_bo->vm);
+ spin_lock(&vm_bo->vm->status_lock);
+ list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
+ spin_unlock(&vm_bo->vm->status_lock);
+ vm_bo->moved = false;
}
/**
- * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
+ * amdgpu_vm_bo_invalidated - vm_bo is invalidated
*
- * @vm: vm providing the BOs
- * @validated: head of validation list
- * @entry: entry to add
+ * @vm_bo: vm_bo which is now invalidated
*
- * Add the page directory to the list of BOs to
- * validate for command submission.
+ * State for normal BOs which are invalidated and that change not yet reflected
+ * in the PTs.
*/
-void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
- struct list_head *validated,
- struct amdgpu_bo_list_entry *entry)
+static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
{
- entry->robj = vm->root.bo;
- entry->priority = 0;
- entry->tv.bo = &entry->robj->tbo;
- entry->tv.shared = true;
- entry->user_pages = NULL;
- list_add(&entry->tv.head, validated);
+ spin_lock(&vm_bo->vm->status_lock);
+ list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
+ spin_unlock(&vm_bo->vm->status_lock);
}
/**
- * amdgpu_vm_validate_layer - validate a single page table level
+ * amdgpu_vm_bo_evicted_user - vm_bo is evicted
*
- * @parent: parent page table level
- * @validate: callback to do the validation
- * @param: parameter for the validation callback
+ * @vm_bo: vm_bo which is evicted
*
- * Validate the page table BOs on command submission if neccessary.
+ * State for BOs used by user mode queues which are not at the location they
+ * should be.
*/
-static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent,
- int (*validate)(void *, struct amdgpu_bo *),
- void *param)
+static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
{
- unsigned i;
- int r;
+ vm_bo->moved = true;
+ spin_lock(&vm_bo->vm->status_lock);
+ list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
+ spin_unlock(&vm_bo->vm->status_lock);
+}
- if (!parent->entries)
- return 0;
+/**
+ * amdgpu_vm_bo_relocated - vm_bo is reloacted
+ *
+ * @vm_bo: vm_bo which is relocated
+ *
+ * State for PDs/PTs which needs to update their parent PD.
+ * For the root PD, just move to idle state.
+ */
+static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
+{
+ amdgpu_vm_assert_locked(vm_bo->vm);
+ if (vm_bo->bo->parent) {
+ spin_lock(&vm_bo->vm->status_lock);
+ list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
+ spin_unlock(&vm_bo->vm->status_lock);
+ } else {
+ amdgpu_vm_bo_idle(vm_bo);
+ }
+}
- for (i = 0; i <= parent->last_entry_used; ++i) {
- struct amdgpu_vm_pt *entry = &parent->entries[i];
+/**
+ * amdgpu_vm_bo_done - vm_bo is done
+ *
+ * @vm_bo: vm_bo which is now done
+ *
+ * State for normal BOs which are invalidated and that change has been updated
+ * in the PTs.
+ */
+static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
+{
+ amdgpu_vm_assert_locked(vm_bo->vm);
+ spin_lock(&vm_bo->vm->status_lock);
+ list_move(&vm_bo->vm_status, &vm_bo->vm->done);
+ spin_unlock(&vm_bo->vm->status_lock);
+}
- if (!entry->bo)
- continue;
+/**
+ * amdgpu_vm_bo_reset_state_machine - reset the vm_bo state machine
+ * @vm: the VM which state machine to reset
+ *
+ * Move all vm_bo object in the VM into a state where they will be updated
+ * again during validation.
+ */
+static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
+{
+ struct amdgpu_vm_bo_base *vm_bo, *tmp;
- r = validate(param, entry->bo);
- if (r)
- return r;
+ amdgpu_vm_assert_locked(vm);
- /*
- * Recurse into the sub directory. This is harmless because we
- * have only a maximum of 5 layers.
- */
- r = amdgpu_vm_validate_level(entry, validate, param);
- if (r)
- return r;
+ spin_lock(&vm->status_lock);
+ list_splice_init(&vm->done, &vm->invalidated);
+ list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
+ vm_bo->moved = true;
+
+ list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
+ struct amdgpu_bo *bo = vm_bo->bo;
+
+ vm_bo->moved = true;
+ if (!bo || bo->tbo.type != ttm_bo_type_kernel)
+ list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
+ else if (bo->parent)
+ list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
}
-
- return r;
+ spin_unlock(&vm->status_lock);
}
/**
- * amdgpu_vm_validate_pt_bos - validate the page table BOs
- *
- * @adev: amdgpu device pointer
- * @vm: vm providing the BOs
- * @validate: callback to do the validation
- * @param: parameter for the validation callback
+ * amdgpu_vm_update_shared - helper to update shared memory stat
+ * @base: base structure for tracking BO usage in a VM
*
- * Validate the page table BOs on command submission if neccessary.
+ * Takes the vm status_lock and updates the shared memory stat. If the basic
+ * stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called
+ * as well.
*/
-int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int (*validate)(void *p, struct amdgpu_bo *bo),
- void *param)
+static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
{
- uint64_t num_evictions;
+ struct amdgpu_vm *vm = base->vm;
+ struct amdgpu_bo *bo = base->bo;
+ uint64_t size = amdgpu_bo_size(bo);
+ uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
+ bool shared;
- /* We only need to validate the page tables
- * if they aren't already valid.
- */
- num_evictions = atomic64_read(&adev->num_evictions);
- if (num_evictions == vm->last_eviction_counter)
- return 0;
-
- return amdgpu_vm_validate_level(&vm->root, validate, param);
+ dma_resv_assert_held(bo->tbo.base.resv);
+ spin_lock(&vm->status_lock);
+ shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
+ if (base->shared != shared) {
+ base->shared = shared;
+ if (shared) {
+ vm->stats[bo_memtype].drm.shared += size;
+ vm->stats[bo_memtype].drm.private -= size;
+ } else {
+ vm->stats[bo_memtype].drm.shared -= size;
+ vm->stats[bo_memtype].drm.private += size;
+ }
+ }
+ spin_unlock(&vm->status_lock);
}
/**
- * amdgpu_vm_move_level_in_lru - move one level of PT BOs to the LRU tail
- *
- * @adev: amdgpu device instance
- * @vm: vm providing the BOs
+ * amdgpu_vm_bo_update_shared - callback when bo gets shared/unshared
+ * @bo: amdgpu buffer object
*
- * Move the PT BOs to the tail of the LRU.
+ * Update the per VM stats for all the vm if needed from private to shared or
+ * vice versa.
*/
-static void amdgpu_vm_move_level_in_lru(struct amdgpu_vm_pt *parent)
+void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo)
{
- unsigned i;
+ struct amdgpu_vm_bo_base *base;
- if (!parent->entries)
- return;
+ for (base = bo->vm_bo; base; base = base->next)
+ amdgpu_vm_update_shared(base);
+}
- for (i = 0; i <= parent->last_entry_used; ++i) {
- struct amdgpu_vm_pt *entry = &parent->entries[i];
+/**
+ * amdgpu_vm_update_stats_locked - helper to update normal memory stat
+ * @base: base structure for tracking BO usage in a VM
+ * @res: the ttm_resource to use for the purpose of accounting, may or may not
+ * be bo->tbo.resource
+ * @sign: if we should add (+1) or subtract (-1) from the stat
+ *
+ * Caller need to have the vm status_lock held. Useful for when multiple update
+ * need to happen at the same time.
+ */
+static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
+ struct ttm_resource *res, int sign)
+{
+ struct amdgpu_vm *vm = base->vm;
+ struct amdgpu_bo *bo = base->bo;
+ int64_t size = sign * amdgpu_bo_size(bo);
+ uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo);
- if (!entry->bo)
- continue;
+ /* For drm-total- and drm-shared-, BO are accounted by their preferred
+ * placement, see also amdgpu_bo_mem_stats_placement.
+ */
+ if (base->shared)
+ vm->stats[bo_memtype].drm.shared += size;
+ else
+ vm->stats[bo_memtype].drm.private += size;
- ttm_bo_move_to_lru_tail(&entry->bo->tbo);
- amdgpu_vm_move_level_in_lru(entry);
+ if (res && res->mem_type < __AMDGPU_PL_NUM) {
+ uint32_t res_memtype = res->mem_type;
+
+ vm->stats[res_memtype].drm.resident += size;
+ /* BO only count as purgeable if it is resident,
+ * since otherwise there's nothing to purge.
+ */
+ if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
+ vm->stats[res_memtype].drm.purgeable += size;
+ if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype)))
+ vm->stats[bo_memtype].evicted += size;
}
}
/**
- * amdgpu_vm_move_pt_bos_in_lru - move the PT BOs to the LRU tail
- *
- * @adev: amdgpu device instance
- * @vm: vm providing the BOs
+ * amdgpu_vm_update_stats - helper to update normal memory stat
+ * @base: base structure for tracking BO usage in a VM
+ * @res: the ttm_resource to use for the purpose of accounting, may or may not
+ * be bo->tbo.resource
+ * @sign: if we should add (+1) or subtract (-1) from the stat
*
- * Move the PT BOs to the tail of the LRU.
+ * Updates the basic memory stat when bo is added/deleted/moved.
*/
-void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
+void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
+ struct ttm_resource *res, int sign)
{
- struct ttm_bo_global *glob = adev->mman.bdev.glob;
+ struct amdgpu_vm *vm = base->vm;
- spin_lock(&glob->lru_lock);
- amdgpu_vm_move_level_in_lru(&vm->root);
- spin_unlock(&glob->lru_lock);
+ spin_lock(&vm->status_lock);
+ amdgpu_vm_update_stats_locked(base, res, sign);
+ spin_unlock(&vm->status_lock);
}
- /**
- * amdgpu_vm_alloc_levels - allocate the PD/PT levels
+/**
+ * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
*
- * @adev: amdgpu_device pointer
- * @vm: requested vm
- * @saddr: start of the address range
- * @eaddr: end of the address range
+ * @base: base structure for tracking BO usage in a VM
+ * @vm: vm to which bo is to be added
+ * @bo: amdgpu buffer object
+ *
+ * Initialize a bo_va_base structure and add it to the appropriate lists
*
- * Make sure the page directories and page tables are allocated
*/
-static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_vm_pt *parent,
- uint64_t saddr, uint64_t eaddr,
- unsigned level)
+void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
+ struct amdgpu_vm *vm, struct amdgpu_bo *bo)
{
- unsigned shift = (adev->vm_manager.num_level - level) *
- adev->vm_manager.block_size;
- unsigned pt_idx, from, to;
- int r;
+ base->vm = vm;
+ base->bo = bo;
+ base->next = NULL;
+ INIT_LIST_HEAD(&base->vm_status);
- if (!parent->entries) {
- unsigned num_entries = amdgpu_vm_num_entries(adev, level);
+ if (!bo)
+ return;
+ base->next = bo->vm_bo;
+ bo->vm_bo = base;
- parent->entries = drm_calloc_large(num_entries,
- sizeof(struct amdgpu_vm_pt));
- if (!parent->entries)
- return -ENOMEM;
- memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
- }
+ spin_lock(&vm->status_lock);
+ base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
+ amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1);
+ spin_unlock(&vm->status_lock);
- from = saddr >> shift;
- to = eaddr >> shift;
- if (from >= amdgpu_vm_num_entries(adev, level) ||
- to >= amdgpu_vm_num_entries(adev, level))
- return -EINVAL;
+ if (!amdgpu_vm_is_bo_always_valid(vm, bo))
+ return;
- if (to > parent->last_entry_used)
- parent->last_entry_used = to;
-
- ++level;
- saddr = saddr & ((1 << shift) - 1);
- eaddr = eaddr & ((1 << shift) - 1);
-
- /* walk over the address space and allocate the page tables */
- for (pt_idx = from; pt_idx <= to; ++pt_idx) {
- struct reservation_object *resv = vm->root.bo->tbo.resv;
- struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
- struct amdgpu_bo *pt;
-
- if (!entry->bo) {
- r = amdgpu_bo_create(adev,
- amdgpu_vm_bo_size(adev, level),
- AMDGPU_GPU_PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_SHADOW |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
- AMDGPU_GEM_CREATE_VRAM_CLEARED,
- NULL, resv, &pt);
- if (r)
- return r;
+ dma_resv_assert_held(vm->root.bo->tbo.base.resv);
- /* Keep a reference to the root directory to avoid
- * freeing them up in the wrong order.
- */
- pt->parent = amdgpu_bo_ref(vm->root.bo);
+ ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
+ if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
+ amdgpu_vm_bo_relocated(base);
+ else
+ amdgpu_vm_bo_idle(base);
- entry->bo = pt;
- entry->addr = 0;
- }
+ if (bo->preferred_domains &
+ amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
+ return;
- if (level < adev->vm_manager.num_level) {
- uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
- uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
- ((1 << shift) - 1);
- r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
- sub_eaddr, level);
- if (r)
- return r;
+ /*
+ * we checked all the prerequisites, but it looks like this per vm bo
+ * is currently evicted. add the bo to the evicted list to make sure it
+ * is validated on next vm use to avoid fault.
+ * */
+ amdgpu_vm_bo_evicted(base);
+}
+
+/**
+ * amdgpu_vm_lock_pd - lock PD in drm_exec
+ *
+ * @vm: vm providing the BOs
+ * @exec: drm execution context
+ * @num_fences: number of extra fences to reserve
+ *
+ * Lock the VM root PD in the DRM execution context.
+ */
+int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
+ unsigned int num_fences)
+{
+ /* We need at least two fences for the VM PD/PT updates */
+ return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base,
+ 2 + num_fences);
+}
+
+/**
+ * amdgpu_vm_lock_done_list - lock all BOs on the done list
+ * @vm: vm providing the BOs
+ * @exec: drm execution context
+ * @num_fences: number of extra fences to reserve
+ *
+ * Lock the BOs on the done list in the DRM execution context.
+ */
+int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
+ unsigned int num_fences)
+{
+ struct list_head *prev = &vm->done;
+ struct amdgpu_bo_va *bo_va;
+ struct amdgpu_bo *bo;
+ int ret;
+
+ /* We can only trust prev->next while holding the lock */
+ spin_lock(&vm->status_lock);
+ while (!list_is_head(prev->next, &vm->done)) {
+ bo_va = list_entry(prev->next, typeof(*bo_va), base.vm_status);
+ spin_unlock(&vm->status_lock);
+
+ bo = bo_va->base.bo;
+ if (bo) {
+ ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 1);
+ if (unlikely(ret))
+ return ret;
}
+ spin_lock(&vm->status_lock);
+ prev = prev->next;
}
+ spin_unlock(&vm->status_lock);
return 0;
}
/**
- * amdgpu_vm_alloc_pts - Allocate page tables.
+ * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
*
- * @adev: amdgpu_device pointer
- * @vm: VM to allocate page tables for
- * @saddr: Start address which needs to be allocated
- * @size: Size from start address we need.
+ * @adev: amdgpu device pointer
+ * @vm: vm providing the BOs
*
- * Make sure the page tables are allocated.
+ * Move all BOs to the end of LRU and remember their positions to put them
+ * together.
*/
-int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- uint64_t saddr, uint64_t size)
+void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
{
- uint64_t last_pfn;
- uint64_t eaddr;
+ spin_lock(&adev->mman.bdev.lru_lock);
+ ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
+ spin_unlock(&adev->mman.bdev.lru_lock);
+}
- /* validate the parameters */
- if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
- return -EINVAL;
+/* Create scheduler entities for page table updates */
+static int amdgpu_vm_init_entities(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
+{
+ int r;
- eaddr = saddr + size - 1;
- last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
- if (last_pfn >= adev->vm_manager.max_pfn) {
- dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
- last_pfn, adev->vm_manager.max_pfn);
- return -EINVAL;
- }
+ r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
+ adev->vm_manager.vm_pte_scheds,
+ adev->vm_manager.vm_pte_num_scheds, NULL);
+ if (r)
+ goto error;
- saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
+ adev->vm_manager.vm_pte_scheds,
+ adev->vm_manager.vm_pte_num_scheds, NULL);
- return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0);
+error:
+ drm_sched_entity_destroy(&vm->immediate);
+ return r;
+}
+
+/* Destroy the entities for page table updates again */
+static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm)
+{
+ drm_sched_entity_destroy(&vm->immediate);
+ drm_sched_entity_destroy(&vm->delayed);
}
/**
- * amdgpu_vm_had_gpu_reset - check if reset occured since last use
- *
- * @adev: amdgpu_device pointer
- * @id: VMID structure
+ * amdgpu_vm_generation - return the page table re-generation counter
+ * @adev: the amdgpu_device
+ * @vm: optional VM to check, might be NULL
*
- * Check if GPU reset occured since last use of the VMID.
+ * Returns a page table re-generation token to allow checking if submissions
+ * are still valid to use this VM. The VM parameter might be NULL in which case
+ * just the VRAM lost counter will be used.
*/
-static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device *adev,
- struct amdgpu_vm_id *id)
+uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
- return id->current_gpu_reset_count !=
- atomic_read(&adev->gpu_reset_counter);
+ uint64_t result = (u64)atomic_read(&adev->vram_lost_counter) << 32;
+
+ if (!vm)
+ return result;
+
+ result += lower_32_bits(vm->generation);
+ /* Add one if the page tables will be re-generated on next CS */
+ if (drm_sched_entity_error(&vm->delayed))
+ ++result;
+
+ return result;
}
/**
- * amdgpu_vm_grab_id - allocate the next free VMID
+ * amdgpu_vm_validate - validate evicted BOs tracked in the VM
+ *
+ * @adev: amdgpu device pointer
+ * @vm: vm providing the BOs
+ * @ticket: optional reservation ticket used to reserve the VM
+ * @validate: callback to do the validation
+ * @param: parameter for the validation callback
*
- * @vm: vm to allocate id for
- * @ring: ring we want to submit job to
- * @sync: sync object where we add dependencies
- * @fence: fence protecting ID from reuse
+ * Validate the page table BOs and per-VM BOs on command submission if
+ * necessary. If a ticket is given, also try to validate evicted user queue
+ * BOs. They must already be reserved with the given ticket.
*
- * Allocate an id for the vm, adding fences to the sync obj as necessary.
+ * Returns:
+ * Validation result.
*/
-int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
- struct amdgpu_sync *sync, struct dma_fence *fence,
- struct amdgpu_job *job)
+int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct ww_acquire_ctx *ticket,
+ int (*validate)(void *p, struct amdgpu_bo *bo),
+ void *param)
{
- struct amdgpu_device *adev = ring->adev;
- unsigned vmhub = ring->funcs->vmhub;
- struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
- uint64_t fence_context = adev->fence_context + ring->idx;
- struct dma_fence *updates = sync->last_vm_update;
- struct amdgpu_vm_id *id, *idle;
- struct dma_fence **fences;
- unsigned i;
- int r = 0;
-
- fences = kmalloc_array(sizeof(void *), id_mgr->num_ids, GFP_KERNEL);
- if (!fences)
- return -ENOMEM;
+ uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
+ struct amdgpu_vm_bo_base *bo_base;
+ struct amdgpu_bo *bo;
+ int r;
- mutex_lock(&id_mgr->lock);
+ if (vm->generation != new_vm_generation) {
+ vm->generation = new_vm_generation;
+ amdgpu_vm_bo_reset_state_machine(vm);
+ amdgpu_vm_fini_entities(vm);
+ r = amdgpu_vm_init_entities(adev, vm);
+ if (r)
+ return r;
+ }
- /* Check if we have an idle VMID */
- i = 0;
- list_for_each_entry(idle, &id_mgr->ids_lru, list) {
- fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
- if (!fences[i])
- break;
- ++i;
- }
-
- /* If we can't find a idle VMID to use, wait till one becomes available */
- if (&idle->list == &id_mgr->ids_lru) {
- u64 fence_context = adev->vm_manager.fence_context + ring->idx;
- unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
- struct dma_fence_array *array;
- unsigned j;
-
- for (j = 0; j < i; ++j)
- dma_fence_get(fences[j]);
-
- array = dma_fence_array_create(i, fences, fence_context,
- seqno, true);
- if (!array) {
- for (j = 0; j < i; ++j)
- dma_fence_put(fences[j]);
- kfree(fences);
- r = -ENOMEM;
- goto error;
- }
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->evicted)) {
+ bo_base = list_first_entry(&vm->evicted,
+ struct amdgpu_vm_bo_base,
+ vm_status);
+ spin_unlock(&vm->status_lock);
+ bo = bo_base->bo;
- r = amdgpu_sync_fence(ring->adev, sync, &array->base);
- dma_fence_put(&array->base);
+ r = validate(param, bo);
if (r)
- goto error;
-
- mutex_unlock(&id_mgr->lock);
- return 0;
+ return r;
+ if (bo->tbo.type != ttm_bo_type_kernel) {
+ amdgpu_vm_bo_moved(bo_base);
+ } else {
+ vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
+ amdgpu_vm_bo_relocated(bo_base);
+ }
+ spin_lock(&vm->status_lock);
}
- kfree(fences);
-
- job->vm_needs_flush = false;
- /* Check if we can use a VMID already assigned to this VM */
- list_for_each_entry_reverse(id, &id_mgr->ids_lru, list) {
- struct dma_fence *flushed;
- bool needs_flush = false;
-
- /* Check all the prerequisites to using this VMID */
- if (amdgpu_vm_had_gpu_reset(adev, id))
- continue;
-
- if (atomic64_read(&id->owner) != vm->client_id)
- continue;
+ while (ticket && !list_empty(&vm->evicted_user)) {
+ bo_base = list_first_entry(&vm->evicted_user,
+ struct amdgpu_vm_bo_base,
+ vm_status);
+ spin_unlock(&vm->status_lock);
- if (job->vm_pd_addr != id->pd_gpu_addr)
- continue;
+ bo = bo_base->bo;
+ dma_resv_assert_held(bo->tbo.base.resv);
- if (!id->last_flush ||
- (id->last_flush->context != fence_context &&
- !dma_fence_is_signaled(id->last_flush)))
- needs_flush = true;
+ r = validate(param, bo);
+ if (r)
+ return r;
- flushed = id->flushed_updates;
- if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
- needs_flush = true;
+ amdgpu_vm_bo_invalidated(bo_base);
- /* Concurrent flushes are only possible starting with Vega10 */
- if (adev->asic_type < CHIP_VEGA10 && needs_flush)
- continue;
+ spin_lock(&vm->status_lock);
+ }
+ spin_unlock(&vm->status_lock);
- /* Good we can use this VMID. Remember this submission as
- * user of the VMID.
- */
- r = amdgpu_sync_fence(ring->adev, &id->active, fence);
- if (r)
- goto error;
+ amdgpu_vm_eviction_lock(vm);
+ vm->evicting = false;
+ amdgpu_vm_eviction_unlock(vm);
- if (updates && (!flushed || dma_fence_is_later(updates, flushed))) {
- dma_fence_put(id->flushed_updates);
- id->flushed_updates = dma_fence_get(updates);
- }
+ return 0;
+}
- if (needs_flush)
- goto needs_flush;
- else
- goto no_flush_needed;
+/**
+ * amdgpu_vm_ready - check VM is ready for updates
+ *
+ * @vm: VM to check
+ *
+ * Check if all VM PDs/PTs are ready for updates
+ *
+ * Returns:
+ * True if VM is not evicting and all VM entities are not stopped
+ */
+bool amdgpu_vm_ready(struct amdgpu_vm *vm)
+{
+ bool ret;
- };
+ amdgpu_vm_assert_locked(vm);
- /* Still no ID to use? Then use the idle one found earlier */
- id = idle;
+ amdgpu_vm_eviction_lock(vm);
+ ret = !vm->evicting;
+ amdgpu_vm_eviction_unlock(vm);
- /* Remember this submission as user of the VMID */
- r = amdgpu_sync_fence(ring->adev, &id->active, fence);
- if (r)
- goto error;
+ spin_lock(&vm->status_lock);
+ ret &= list_empty(&vm->evicted);
+ spin_unlock(&vm->status_lock);
- id->pd_gpu_addr = job->vm_pd_addr;
- dma_fence_put(id->flushed_updates);
- id->flushed_updates = dma_fence_get(updates);
- id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
- atomic64_set(&id->owner, vm->client_id);
+ spin_lock(&vm->immediate.lock);
+ ret &= !vm->immediate.stopped;
+ spin_unlock(&vm->immediate.lock);
-needs_flush:
- job->vm_needs_flush = true;
- dma_fence_put(id->last_flush);
- id->last_flush = NULL;
+ spin_lock(&vm->delayed.lock);
+ ret &= !vm->delayed.stopped;
+ spin_unlock(&vm->delayed.lock);
-no_flush_needed:
- list_move_tail(&id->list, &id_mgr->ids_lru);
+ return ret;
+}
- job->vm_id = id - id_mgr->ids;
- trace_amdgpu_vm_grab_id(vm, ring, job);
+/**
+ * amdgpu_vm_check_compute_bug - check whether asic has compute vm bug
+ *
+ * @adev: amdgpu_device pointer
+ */
+void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
+{
+ const struct amdgpu_ip_block *ip_block;
+ bool has_compute_vm_bug;
+ struct amdgpu_ring *ring;
+ int i;
+
+ has_compute_vm_bug = false;
+
+ ip_block = amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
+ if (ip_block) {
+ /* Compute has a VM bug for GFX version < 7.
+ Compute has a VM bug for GFX 8 MEC firmware version < 673.*/
+ if (ip_block->version->major <= 7)
+ has_compute_vm_bug = true;
+ else if (ip_block->version->major == 8)
+ if (adev->gfx.mec_fw_version < 673)
+ has_compute_vm_bug = true;
+ }
-error:
- mutex_unlock(&id_mgr->lock);
- return r;
+ for (i = 0; i < adev->num_rings; i++) {
+ ring = adev->rings[i];
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
+ /* only compute rings */
+ ring->has_compute_vm_bug = has_compute_vm_bug;
+ else
+ ring->has_compute_vm_bug = false;
+ }
}
-static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
+/**
+ * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
+ *
+ * @ring: ring on which the job will be submitted
+ * @job: job to submit
+ *
+ * Returns:
+ * True if sync is needed.
+ */
+bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
+ struct amdgpu_job *job)
{
struct amdgpu_device *adev = ring->adev;
- const struct amdgpu_ip_block *ip_block;
+ unsigned vmhub = ring->vm_hub;
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
- if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
- /* only compute rings */
+ if (job->vmid == 0)
return false;
- ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
- if (!ip_block)
- return false;
-
- if (ip_block->version->major <= 7) {
- /* gfx7 has no workaround */
+ if (job->vm_needs_flush || ring->has_compute_vm_bug)
return true;
- } else if (ip_block->version->major == 8) {
- if (adev->gfx.mec_fw_version >= 673)
- /* gfx8 is fixed in MEC firmware 673 */
- return false;
- else
- return true;
- }
- return false;
-}
-static u64 amdgpu_vm_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
-{
- u64 addr = mc_addr;
+ if (ring->funcs->emit_gds_switch && job->gds_switch_needed)
+ return true;
- if (adev->gart.gart_funcs->adjust_mc_addr)
- addr = adev->gart.gart_funcs->adjust_mc_addr(adev, addr);
+ if (amdgpu_vmid_had_gpu_reset(adev, &id_mgr->ids[job->vmid]))
+ return true;
- return addr;
+ return false;
}
/**
* amdgpu_vm_flush - hardware flush the vm
*
* @ring: ring to use for flush
- * @vm_id: vmid number to use
- * @pd_addr: address of the page directory
+ * @job: related job
+ * @need_pipe_sync: is pipe sync needed
*
* Emit a VM flush when it is necessary.
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
*/
-int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
+ bool need_pipe_sync)
{
struct amdgpu_device *adev = ring->adev;
- unsigned vmhub = ring->funcs->vmhub;
- struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
- struct amdgpu_vm_id *id = &id_mgr->ids[job->vm_id];
- bool gds_switch_needed = ring->funcs->emit_gds_switch && (
- id->gds_base != job->gds_base ||
- id->gds_size != job->gds_size ||
- id->gws_base != job->gws_base ||
- id->gws_size != job->gws_size ||
- id->oa_base != job->oa_base ||
- id->oa_size != job->oa_size);
- bool vm_flush_needed = job->vm_needs_flush ||
- amdgpu_vm_ring_has_compute_vm_bug(ring);
- unsigned patch_offset = 0;
+ struct amdgpu_isolation *isolation = &adev->isolation[ring->xcp_id];
+ unsigned vmhub = ring->vm_hub;
+ struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
+ struct amdgpu_vmid *id = &id_mgr->ids[job->vmid];
+ bool spm_update_needed = job->spm_update_needed;
+ bool gds_switch_needed = ring->funcs->emit_gds_switch &&
+ job->gds_switch_needed;
+ bool vm_flush_needed = job->vm_needs_flush;
+ bool cleaner_shader_needed = false;
+ bool pasid_mapping_needed = false;
+ struct dma_fence *fence = NULL;
+ struct amdgpu_fence *af;
+ unsigned int patch;
int r;
- if (amdgpu_vm_had_gpu_reset(adev, id)) {
+ if (amdgpu_vmid_had_gpu_reset(adev, id)) {
gds_switch_needed = true;
vm_flush_needed = true;
+ pasid_mapping_needed = true;
+ spm_update_needed = true;
}
- if (!vm_flush_needed && !gds_switch_needed)
+ mutex_lock(&id_mgr->lock);
+ if (id->pasid != job->pasid || !id->pasid_mapping ||
+ !dma_fence_is_signaled(id->pasid_mapping))
+ pasid_mapping_needed = true;
+ mutex_unlock(&id_mgr->lock);
+
+ gds_switch_needed &= !!ring->funcs->emit_gds_switch;
+ vm_flush_needed &= !!ring->funcs->emit_vm_flush &&
+ job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
+ pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
+ ring->funcs->emit_wreg;
+
+ cleaner_shader_needed = job->run_cleaner_shader &&
+ adev->gfx.enable_cleaner_shader &&
+ ring->funcs->emit_cleaner_shader && job->base.s_fence &&
+ &job->base.s_fence->scheduled == isolation->spearhead;
+
+ if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync &&
+ !cleaner_shader_needed)
return 0;
+ amdgpu_ring_ib_begin(ring);
if (ring->funcs->init_cond_exec)
- patch_offset = amdgpu_ring_init_cond_exec(ring);
+ patch = amdgpu_ring_init_cond_exec(ring,
+ ring->cond_exe_gpu_addr);
- if (ring->funcs->emit_pipeline_sync && !job->need_pipeline_sync)
+ if (need_pipe_sync)
amdgpu_ring_emit_pipeline_sync(ring);
- if (ring->funcs->emit_vm_flush && vm_flush_needed) {
- u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr);
- struct dma_fence *fence;
+ if (cleaner_shader_needed)
+ ring->funcs->emit_cleaner_shader(ring);
+
+ if (vm_flush_needed) {
+ trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
+ amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
+ }
+
+ if (pasid_mapping_needed)
+ amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
+
+ if (spm_update_needed && adev->gfx.rlc.funcs->update_spm_vmid)
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, ring, job->vmid);
- trace_amdgpu_vm_flush(ring, job->vm_id, pd_addr);
- amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr);
+ if (ring->funcs->emit_gds_switch &&
+ gds_switch_needed) {
+ amdgpu_ring_emit_gds_switch(ring, job->vmid, job->gds_base,
+ job->gds_size, job->gws_base,
+ job->gws_size, job->oa_base,
+ job->oa_size);
+ }
- r = amdgpu_fence_emit(ring, &fence);
+ if (vm_flush_needed || pasid_mapping_needed || cleaner_shader_needed) {
+ r = amdgpu_fence_emit(ring, &fence, NULL, 0);
if (r)
return r;
+ /* this is part of the job's context */
+ af = container_of(fence, struct amdgpu_fence, base);
+ af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0;
+ }
+ if (vm_flush_needed) {
mutex_lock(&id_mgr->lock);
dma_fence_put(id->last_flush);
- id->last_flush = fence;
+ id->last_flush = dma_fence_get(fence);
+ id->current_gpu_reset_count =
+ atomic_read(&adev->gpu_reset_counter);
mutex_unlock(&id_mgr->lock);
}
- if (gds_switch_needed) {
- id->gds_base = job->gds_base;
- id->gds_size = job->gds_size;
- id->gws_base = job->gws_base;
- id->gws_size = job->gws_size;
- id->oa_base = job->oa_base;
- id->oa_size = job->oa_size;
- amdgpu_ring_emit_gds_switch(ring, job->vm_id, job->gds_base,
- job->gds_size, job->gws_base,
- job->gws_size, job->oa_base,
- job->oa_size);
+ if (pasid_mapping_needed) {
+ mutex_lock(&id_mgr->lock);
+ id->pasid = job->pasid;
+ dma_fence_put(id->pasid_mapping);
+ id->pasid_mapping = dma_fence_get(fence);
+ mutex_unlock(&id_mgr->lock);
}
- if (ring->funcs->patch_cond_exec)
- amdgpu_ring_patch_cond_exec(ring, patch_offset);
+ /*
+ * Make sure that all other submissions wait for the cleaner shader to
+ * finish before we push them to the HW.
+ */
+ if (cleaner_shader_needed) {
+ trace_amdgpu_cleaner_shader(ring, fence);
+ mutex_lock(&adev->enforce_isolation_mutex);
+ dma_fence_put(isolation->spearhead);
+ isolation->spearhead = dma_fence_get(fence);
+ mutex_unlock(&adev->enforce_isolation_mutex);
+ }
+ dma_fence_put(fence);
+
+ amdgpu_ring_patch_cond_exec(ring, patch);
/* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
if (ring->funcs->emit_switch_buffer) {
amdgpu_ring_emit_switch_buffer(ring);
amdgpu_ring_emit_switch_buffer(ring);
}
- return 0;
-}
-
-/**
- * amdgpu_vm_reset_id - reset VMID to zero
- *
- * @adev: amdgpu device structure
- * @vm_id: vmid number to use
- *
- * Reset saved GDW, GWS and OA to force switch on next flush.
- */
-void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
- unsigned vmid)
-{
- struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
- struct amdgpu_vm_id *id = &id_mgr->ids[vmid];
- id->gds_base = 0;
- id->gds_size = 0;
- id->gws_base = 0;
- id->gws_size = 0;
- id->oa_base = 0;
- id->oa_size = 0;
+ amdgpu_ring_ib_end(ring);
+ return 0;
}
/**
@@ -691,73 +903,22 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
* Returns the found bo_va or NULL if none is found
*
* Object has to be reserved!
+ *
+ * Returns:
+ * Found bo_va or NULL.
*/
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo)
{
- struct amdgpu_bo_va *bo_va;
-
- list_for_each_entry(bo_va, &bo->va, bo_list) {
- if (bo_va->vm == vm) {
- return bo_va;
- }
- }
- return NULL;
-}
-
-/**
- * amdgpu_vm_do_set_ptes - helper to call the right asic function
- *
- * @params: see amdgpu_pte_update_params definition
- * @pe: addr of the page entry
- * @addr: dst addr to write into pe
- * @count: number of page entries to update
- * @incr: increase next addr by incr bytes
- * @flags: hw access flags
- *
- * Traces the parameters and calls the right asic functions
- * to setup the page table using the DMA.
- */
-static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
- uint64_t pe, uint64_t addr,
- unsigned count, uint32_t incr,
- uint64_t flags)
-{
- trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
+ struct amdgpu_vm_bo_base *base;
- if (count < 3) {
- amdgpu_vm_write_pte(params->adev, params->ib, pe,
- addr | flags, count, incr);
+ for (base = bo->vm_bo; base; base = base->next) {
+ if (base->vm != vm)
+ continue;
- } else {
- amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
- count, incr, flags);
+ return container_of(base, struct amdgpu_bo_va, base);
}
-}
-
-/**
- * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
- *
- * @params: see amdgpu_pte_update_params definition
- * @pe: addr of the page entry
- * @addr: dst addr to write into pe
- * @count: number of page entries to update
- * @incr: increase next addr by incr bytes
- * @flags: hw access flags
- *
- * Traces the parameters and calls the DMA function to copy the PTEs.
- */
-static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
- uint64_t pe, uint64_t addr,
- unsigned count, uint32_t incr,
- uint64_t flags)
-{
- uint64_t src = (params->src + (addr >> 12) * 8);
-
-
- trace_amdgpu_vm_copy_ptes(pe, src, count);
-
- amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
+ return NULL;
}
/**
@@ -767,9 +928,12 @@ static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
* @addr: the unmapped addr
*
* Look up the physical address of the page that the pte resolves
- * to and return the pointer for the page table entry.
+ * to.
+ *
+ * Returns:
+ * The pointer for the page table entry.
*/
-static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
+uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
{
uint64_t result;
@@ -784,619 +948,297 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
return result;
}
-/*
- * amdgpu_vm_update_level - update a single level in the hierarchy
+/**
+ * amdgpu_vm_update_pdes - make sure that all directories are valid
*
* @adev: amdgpu_device pointer
* @vm: requested vm
- * @parent: parent directory
+ * @immediate: submit immediately to the paging queue
+ *
+ * Makes sure all directories are up to date.
*
- * Makes sure all entries in @parent are up to date.
- * Returns 0 for success, error for failure.
+ * Returns:
+ * 0 for success, error for failure.
*/
-static int amdgpu_vm_update_level(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_vm_pt *parent,
- unsigned level)
+int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, bool immediate)
{
- struct amdgpu_bo *shadow;
- struct amdgpu_ring *ring;
- uint64_t pd_addr, shadow_addr;
- uint32_t incr = amdgpu_vm_bo_size(adev, level + 1);
- uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
- unsigned count = 0, pt_idx, ndw;
- struct amdgpu_job *job;
- struct amdgpu_pte_update_params params;
- struct dma_fence *fence = NULL;
-
- int r;
+ struct amdgpu_vm_update_params params;
+ struct amdgpu_vm_bo_base *entry;
+ bool flush_tlb_needed = false;
+ LIST_HEAD(relocated);
+ int r, idx;
- if (!parent->entries)
- return 0;
- ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
-
- /* padding, etc. */
- ndw = 64;
+ amdgpu_vm_assert_locked(vm);
- /* assume the worst case */
- ndw += parent->last_entry_used * 6;
-
- pd_addr = amdgpu_bo_gpu_offset(parent->bo);
+ spin_lock(&vm->status_lock);
+ list_splice_init(&vm->relocated, &relocated);
+ spin_unlock(&vm->status_lock);
- shadow = parent->bo->shadow;
- if (shadow) {
- r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
- if (r)
- return r;
- shadow_addr = amdgpu_bo_gpu_offset(shadow);
- ndw *= 2;
- } else {
- shadow_addr = 0;
- }
+ if (list_empty(&relocated))
+ return 0;
- r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
- if (r)
- return r;
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
+ return -ENODEV;
memset(&params, 0, sizeof(params));
params.adev = adev;
- params.ib = &job->ibs[0];
-
- /* walk over the address space and update the directory */
- for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
- struct amdgpu_bo *bo = parent->entries[pt_idx].bo;
- uint64_t pde, pt;
-
- if (bo == NULL)
- continue;
-
- if (bo->shadow) {
- struct amdgpu_bo *pt_shadow = bo->shadow;
-
- r = amdgpu_ttm_bind(&pt_shadow->tbo,
- &pt_shadow->tbo.mem);
- if (r)
- return r;
- }
-
- pt = amdgpu_bo_gpu_offset(bo);
- if (parent->entries[pt_idx].addr == pt)
- continue;
-
- parent->entries[pt_idx].addr = pt;
-
- pde = pd_addr + pt_idx * 8;
- if (((last_pde + 8 * count) != pde) ||
- ((last_pt + incr * count) != pt) ||
- (count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
-
- if (count) {
- uint64_t pt_addr =
- amdgpu_vm_adjust_mc_addr(adev, last_pt);
-
- if (shadow)
- amdgpu_vm_do_set_ptes(&params,
- last_shadow,
- pt_addr, count,
- incr,
- AMDGPU_PTE_VALID);
-
- amdgpu_vm_do_set_ptes(&params, last_pde,
- pt_addr, count, incr,
- AMDGPU_PTE_VALID);
- }
-
- count = 1;
- last_pde = pde;
- last_shadow = shadow_addr + pt_idx * 8;
- last_pt = pt;
- } else {
- ++count;
- }
- }
-
- if (count) {
- uint64_t pt_addr = amdgpu_vm_adjust_mc_addr(adev, last_pt);
+ params.vm = vm;
+ params.immediate = immediate;
- if (vm->root.bo->shadow)
- amdgpu_vm_do_set_ptes(&params, last_shadow, pt_addr,
- count, incr, AMDGPU_PTE_VALID);
+ r = vm->update_funcs->prepare(&params, NULL,
+ AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES);
+ if (r)
+ goto error;
- amdgpu_vm_do_set_ptes(&params, last_pde, pt_addr,
- count, incr, AMDGPU_PTE_VALID);
- }
+ list_for_each_entry(entry, &relocated, vm_status) {
+ /* vm_flush_needed after updating moved PDEs */
+ flush_tlb_needed |= entry->moved;
- if (params.ib->length_dw == 0) {
- amdgpu_job_free(job);
- } else {
- amdgpu_ring_pad_ib(ring, params.ib);
- amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv,
- AMDGPU_FENCE_OWNER_VM);
- if (shadow)
- amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
- AMDGPU_FENCE_OWNER_VM);
-
- WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, ring, &vm->entity,
- AMDGPU_FENCE_OWNER_VM, &fence);
+ r = amdgpu_vm_pde_update(&params, entry);
if (r)
- goto error_free;
-
- amdgpu_bo_fence(parent->bo, fence, true);
- dma_fence_put(vm->last_dir_update);
- vm->last_dir_update = dma_fence_get(fence);
- dma_fence_put(fence);
+ goto error;
}
- /*
- * Recurse into the subdirectories. This recursion is harmless because
- * we only have a maximum of 5 layers.
- */
- for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
- struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
- if (!entry->bo)
- continue;
+ r = vm->update_funcs->commit(&params, &vm->last_update);
+ if (r)
+ goto error;
- r = amdgpu_vm_update_level(adev, vm, entry, level + 1);
- if (r)
- return r;
- }
+ if (flush_tlb_needed)
+ atomic64_inc(&vm->tlb_seq);
- return 0;
+ while (!list_empty(&relocated)) {
+ entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
+ vm_status);
+ amdgpu_vm_bo_idle(entry);
+ }
-error_free:
- amdgpu_job_free(job);
+error:
+ drm_dev_exit(idx);
return r;
}
-/*
- * amdgpu_vm_update_directories - make sure that all directories are valid
- *
- * @adev: amdgpu_device pointer
- * @vm: requested vm
- *
- * Makes sure all directories are up to date.
- * Returns 0 for success, error for failure.
- */
-int amdgpu_vm_update_directories(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
-{
- return amdgpu_vm_update_level(adev, vm, &vm->root, 0);
-}
-
/**
- * amdgpu_vm_find_pt - find the page table for an address
- *
- * @p: see amdgpu_pte_update_params definition
- * @addr: virtual address in question
+ * amdgpu_vm_tlb_seq_cb - make sure to increment tlb sequence
+ * @fence: unused
+ * @cb: the callback structure
*
- * Find the page table BO for a virtual address, return NULL when none found.
+ * Increments the tlb sequence to make sure that future CS execute a VM flush.
*/
-static struct amdgpu_bo *amdgpu_vm_get_pt(struct amdgpu_pte_update_params *p,
- uint64_t addr)
+static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,
+ struct dma_fence_cb *cb)
{
- struct amdgpu_vm_pt *entry = &p->vm->root;
- unsigned idx, level = p->adev->vm_manager.num_level;
+ struct amdgpu_vm_tlb_seq_struct *tlb_cb;
- while (entry->entries) {
- idx = addr >> (p->adev->vm_manager.block_size * level--);
- idx %= amdgpu_bo_size(entry->bo) / 8;
- entry = &entry->entries[idx];
- }
-
- if (level)
- return NULL;
-
- return entry->bo;
+ tlb_cb = container_of(cb, typeof(*tlb_cb), cb);
+ atomic64_inc(&tlb_cb->vm->tlb_seq);
+ kfree(tlb_cb);
}
/**
- * amdgpu_vm_update_ptes - make sure that page tables are valid
+ * amdgpu_vm_tlb_flush - prepare TLB flush
*
- * @params: see amdgpu_pte_update_params definition
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- * @dst: destination address to map to, the next dst inside the function
- * @flags: mapping flags
- *
- * Update the page tables in the range @start - @end.
- */
-static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
- uint64_t start, uint64_t end,
- uint64_t dst, uint64_t flags)
-{
- struct amdgpu_device *adev = params->adev;
- const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
-
- uint64_t cur_pe_start, cur_nptes, cur_dst;
- uint64_t addr; /* next GPU address to be updated */
- struct amdgpu_bo *pt;
- unsigned nptes; /* next number of ptes to be updated */
- uint64_t next_pe_start;
-
- /* initialize the variables */
- addr = start;
- pt = amdgpu_vm_get_pt(params, addr);
- if (!pt) {
- pr_err("PT not found, aborting update_ptes\n");
- return;
- }
-
- if (params->shadow) {
- if (!pt->shadow)
- return;
- pt = pt->shadow;
- }
- if ((addr & ~mask) == (end & ~mask))
- nptes = end - addr;
- else
- nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
-
- cur_pe_start = amdgpu_bo_gpu_offset(pt);
- cur_pe_start += (addr & mask) * 8;
- cur_nptes = nptes;
- cur_dst = dst;
-
- /* for next ptb*/
- addr += nptes;
- dst += nptes * AMDGPU_GPU_PAGE_SIZE;
-
- /* walk over the address space and update the page tables */
- while (addr < end) {
- pt = amdgpu_vm_get_pt(params, addr);
- if (!pt) {
- pr_err("PT not found, aborting update_ptes\n");
- return;
- }
-
- if (params->shadow) {
- if (!pt->shadow)
- return;
- pt = pt->shadow;
- }
-
- if ((addr & ~mask) == (end & ~mask))
- nptes = end - addr;
- else
- nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
-
- next_pe_start = amdgpu_bo_gpu_offset(pt);
- next_pe_start += (addr & mask) * 8;
-
- if ((cur_pe_start + 8 * cur_nptes) == next_pe_start &&
- ((cur_nptes + nptes) <= AMDGPU_VM_MAX_UPDATE_SIZE)) {
- /* The next ptb is consecutive to current ptb.
- * Don't call the update function now.
- * Will update two ptbs together in future.
- */
- cur_nptes += nptes;
- } else {
- params->func(params, cur_pe_start, cur_dst, cur_nptes,
- AMDGPU_GPU_PAGE_SIZE, flags);
-
- cur_pe_start = next_pe_start;
- cur_nptes = nptes;
- cur_dst = dst;
- }
-
- /* for next ptb*/
- addr += nptes;
- dst += nptes * AMDGPU_GPU_PAGE_SIZE;
- }
-
- params->func(params, cur_pe_start, cur_dst, cur_nptes,
- AMDGPU_GPU_PAGE_SIZE, flags);
-}
-
-/*
- * amdgpu_vm_frag_ptes - add fragment information to PTEs
+ * @params: parameters for update
+ * @fence: input fence to sync TLB flush with
+ * @tlb_cb: the callback structure
*
- * @params: see amdgpu_pte_update_params definition
- * @vm: requested vm
- * @start: first PTE to handle
- * @end: last PTE to handle
- * @dst: addr those PTEs should point to
- * @flags: hw mapping flags
+ * Increments the tlb sequence to make sure that future CS execute a VM flush.
*/
-static void amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
- uint64_t start, uint64_t end,
- uint64_t dst, uint64_t flags)
+static void
+amdgpu_vm_tlb_flush(struct amdgpu_vm_update_params *params,
+ struct dma_fence **fence,
+ struct amdgpu_vm_tlb_seq_struct *tlb_cb)
{
- /**
- * The MC L1 TLB supports variable sized pages, based on a fragment
- * field in the PTE. When this field is set to a non-zero value, page
- * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
- * flags are considered valid for all PTEs within the fragment range
- * and corresponding mappings are assumed to be physically contiguous.
- *
- * The L1 TLB can store a single PTE for the whole fragment,
- * significantly increasing the space available for translation
- * caching. This leads to large improvements in throughput when the
- * TLB is under pressure.
- *
- * The L2 TLB distributes small and large fragments into two
- * asymmetric partitions. The large fragment cache is significantly
- * larger. Thus, we try to use large fragments wherever possible.
- * Userspace can support this by aligning virtual base address and
- * allocation size to the fragment size.
- */
-
- /* SI and newer are optimized for 64KB */
- uint64_t frag_flags = AMDGPU_PTE_FRAG(AMDGPU_LOG2_PAGES_PER_FRAG);
- uint64_t frag_align = 1 << AMDGPU_LOG2_PAGES_PER_FRAG;
+ struct amdgpu_vm *vm = params->vm;
- uint64_t frag_start = ALIGN(start, frag_align);
- uint64_t frag_end = end & ~(frag_align - 1);
-
- /* system pages are non continuously */
- if (params->src || !(flags & AMDGPU_PTE_VALID) ||
- (frag_start >= frag_end)) {
-
- amdgpu_vm_update_ptes(params, start, end, dst, flags);
+ tlb_cb->vm = vm;
+ if (!fence || !*fence) {
+ amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
return;
}
- /* handle the 4K area at the beginning */
- if (start != frag_start) {
- amdgpu_vm_update_ptes(params, start, frag_start,
- dst, flags);
- dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
+ if (!dma_fence_add_callback(*fence, &tlb_cb->cb,
+ amdgpu_vm_tlb_seq_cb)) {
+ dma_fence_put(vm->last_tlb_flush);
+ vm->last_tlb_flush = dma_fence_get(*fence);
+ } else {
+ amdgpu_vm_tlb_seq_cb(NULL, &tlb_cb->cb);
}
- /* handle the area in the middle */
- amdgpu_vm_update_ptes(params, frag_start, frag_end, dst,
- flags | frag_flags);
+ /* Prepare a TLB flush fence to be attached to PTs */
+ if (!params->unlocked && vm->is_compute_context) {
+ amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
- /* handle the 4K area at the end */
- if (frag_end != end) {
- dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
- amdgpu_vm_update_ptes(params, frag_end, end, dst, flags);
+ /* Makes sure no PD/PT is freed before the flush */
+ dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence,
+ DMA_RESV_USAGE_BOOKKEEP);
}
}
/**
- * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
- *
- * @adev: amdgpu_device pointer
- * @exclusive: fence we need to sync to
- * @src: address where to copy page table entries from
- * @pages_addr: DMA addresses to use for mapping
- * @vm: requested vm
+ * amdgpu_vm_update_range - update a range in the vm page table
+ *
+ * @adev: amdgpu_device pointer to use for commands
+ * @vm: the VM to update the range
+ * @immediate: immediate submission in a page fault
+ * @unlocked: unlocked invalidation during MM callback
+ * @flush_tlb: trigger tlb invalidation after update completed
+ * @allow_override: change MTYPE for local NUMA nodes
+ * @sync: fences we need to sync to
* @start: start of mapped range
* @last: last mapped entry
* @flags: flags for the entries
- * @addr: addr to set the area to
+ * @offset: offset into nodes and pages_addr
+ * @vram_base: base for vram mappings
+ * @res: ttm_resource to map
+ * @pages_addr: DMA addresses to use for mapping
* @fence: optional resulting fence
*
* Fill in the page table entries between @start and @last.
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, negative erro code for failure.
*/
-static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
- struct dma_fence *exclusive,
- uint64_t src,
- dma_addr_t *pages_addr,
- struct amdgpu_vm *vm,
- uint64_t start, uint64_t last,
- uint64_t flags, uint64_t addr,
- struct dma_fence **fence)
+int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ bool immediate, bool unlocked, bool flush_tlb,
+ bool allow_override, struct amdgpu_sync *sync,
+ uint64_t start, uint64_t last, uint64_t flags,
+ uint64_t offset, uint64_t vram_base,
+ struct ttm_resource *res, dma_addr_t *pages_addr,
+ struct dma_fence **fence)
{
- struct amdgpu_ring *ring;
- void *owner = AMDGPU_FENCE_OWNER_VM;
- unsigned nptes, ncmds, ndw;
- struct amdgpu_job *job;
- struct amdgpu_pte_update_params params;
- struct dma_fence *f = NULL;
- int r;
+ struct amdgpu_vm_tlb_seq_struct *tlb_cb;
+ struct amdgpu_vm_update_params params;
+ struct amdgpu_res_cursor cursor;
+ int r, idx;
- memset(&params, 0, sizeof(params));
- params.adev = adev;
- params.vm = vm;
- params.src = src;
-
- ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
+ return -ENODEV;
- /* sync to everything on unmapping */
- if (!(flags & AMDGPU_PTE_VALID))
- owner = AMDGPU_FENCE_OWNER_UNDEFINED;
+ tlb_cb = kmalloc(sizeof(*tlb_cb), GFP_KERNEL);
+ if (!tlb_cb) {
+ drm_dev_exit(idx);
+ return -ENOMEM;
+ }
- nptes = last - start + 1;
+ /* Vega20+XGMI where PTEs get inadvertently cached in L2 texture cache,
+ * heavy-weight flush TLB unconditionally.
+ */
+ flush_tlb |= adev->gmc.xgmi.num_physical_nodes &&
+ amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0);
/*
- * reserve space for one command every (1 << BLOCK_SIZE)
- * entries or 2k dwords (whatever is smaller)
+ * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB
*/
- ncmds = (nptes >> min(adev->vm_manager.block_size, 11u)) + 1;
-
- /* padding, etc. */
- ndw = 64;
+ flush_tlb |= amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 0);
- if (src) {
- /* only copy commands needed */
- ndw += ncmds * 7;
-
- params.func = amdgpu_vm_do_copy_ptes;
-
- } else if (pages_addr) {
- /* copy commands needed */
- ndw += ncmds * 7;
-
- /* and also PTEs */
- ndw += nptes * 2;
-
- params.func = amdgpu_vm_do_copy_ptes;
-
- } else {
- /* set page commands needed */
- ndw += ncmds * 10;
+ memset(&params, 0, sizeof(params));
+ params.adev = adev;
+ params.vm = vm;
+ params.immediate = immediate;
+ params.pages_addr = pages_addr;
+ params.unlocked = unlocked;
+ params.needs_flush = flush_tlb;
+ params.allow_override = allow_override;
+ INIT_LIST_HEAD(&params.tlb_flush_waitlist);
+
+ amdgpu_vm_eviction_lock(vm);
+ if (vm->evicting) {
+ r = -EBUSY;
+ goto error_free;
+ }
- /* two extra commands for begin/end of fragment */
- ndw += 2 * 10;
+ if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
+ struct dma_fence *tmp = dma_fence_get_stub();
- params.func = amdgpu_vm_do_set_ptes;
+ amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
+ swap(vm->last_unlocked, tmp);
+ dma_fence_put(tmp);
}
- r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
+ r = vm->update_funcs->prepare(&params, sync,
+ AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE);
if (r)
- return r;
+ goto error_free;
- params.ib = &job->ibs[0];
+ amdgpu_res_first(pages_addr ? NULL : res, offset,
+ (last - start + 1) * AMDGPU_GPU_PAGE_SIZE, &cursor);
+ while (cursor.remaining) {
+ uint64_t tmp, num_entries, addr;
- if (!src && pages_addr) {
- uint64_t *pte;
- unsigned i;
+ num_entries = cursor.size >> AMDGPU_GPU_PAGE_SHIFT;
+ if (pages_addr) {
+ bool contiguous = true;
+
+ if (num_entries > AMDGPU_GPU_PAGES_IN_CPU_PAGE) {
+ uint64_t pfn = cursor.start >> PAGE_SHIFT;
+ uint64_t count;
+
+ contiguous = pages_addr[pfn + 1] ==
+ pages_addr[pfn] + PAGE_SIZE;
+
+ tmp = num_entries /
+ AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+ for (count = 2; count < tmp; ++count) {
+ uint64_t idx = pfn + count;
+
+ if (contiguous != (pages_addr[idx] ==
+ pages_addr[idx - 1] + PAGE_SIZE))
+ break;
+ }
+ if (!contiguous)
+ count--;
+ num_entries = count *
+ AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+ }
- /* Put the PTEs at the end of the IB. */
- i = ndw - nptes * 2;
- pte= (uint64_t *)&(job->ibs->ptr[i]);
- params.src = job->ibs->gpu_addr + i * 4;
+ if (!contiguous) {
+ addr = cursor.start;
+ params.pages_addr = pages_addr;
+ } else {
+ addr = pages_addr[cursor.start >> PAGE_SHIFT];
+ params.pages_addr = NULL;
+ }
- for (i = 0; i < nptes; ++i) {
- pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
- AMDGPU_GPU_PAGE_SIZE);
- pte[i] |= flags;
+ } else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT_FLAG(adev))) {
+ addr = vram_base + cursor.start;
+ } else {
+ addr = 0;
}
- addr = 0;
- }
- r = amdgpu_sync_fence(adev, &job->sync, exclusive);
- if (r)
- goto error_free;
+ tmp = start + num_entries;
+ r = amdgpu_vm_ptes_update(&params, start, tmp, addr, flags);
+ if (r)
+ goto error_free;
- r = amdgpu_sync_resv(adev, &job->sync, vm->root.bo->tbo.resv,
- owner);
- if (r)
- goto error_free;
+ amdgpu_res_next(&cursor, num_entries * AMDGPU_GPU_PAGE_SIZE);
+ start = tmp;
+ }
- r = reservation_object_reserve_shared(vm->root.bo->tbo.resv);
+ r = vm->update_funcs->commit(&params, fence);
if (r)
goto error_free;
- params.shadow = true;
- amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
- params.shadow = false;
- amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
-
- amdgpu_ring_pad_ib(ring, params.ib);
- WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, ring, &vm->entity,
- AMDGPU_FENCE_OWNER_VM, &f);
- if (r)
- goto error_free;
+ if (params.needs_flush) {
+ amdgpu_vm_tlb_flush(&params, fence, tlb_cb);
+ tlb_cb = NULL;
+ }
- amdgpu_bo_fence(vm->root.bo, f, true);
- dma_fence_put(*fence);
- *fence = f;
- return 0;
+ amdgpu_vm_pt_free_list(adev, &params);
error_free:
- amdgpu_job_free(job);
+ kfree(tlb_cb);
+ amdgpu_vm_eviction_unlock(vm);
+ drm_dev_exit(idx);
return r;
}
-/**
- * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
- *
- * @adev: amdgpu_device pointer
- * @exclusive: fence we need to sync to
- * @gtt_flags: flags as they are used for GTT
- * @pages_addr: DMA addresses to use for mapping
- * @vm: requested vm
- * @mapping: mapped range and flags to use for the update
- * @flags: HW flags for the mapping
- * @nodes: array of drm_mm_nodes with the MC addresses
- * @fence: optional resulting fence
- *
- * Split the mapping into smaller chunks so that each update fits
- * into a SDMA IB.
- * Returns 0 for success, -EINVAL for failure.
- */
-static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
- struct dma_fence *exclusive,
- uint64_t gtt_flags,
- dma_addr_t *pages_addr,
- struct amdgpu_vm *vm,
- struct amdgpu_bo_va_mapping *mapping,
- uint64_t flags,
- struct drm_mm_node *nodes,
- struct dma_fence **fence)
+void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
+ struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM])
{
- uint64_t pfn, src = 0, start = mapping->start;
- int r;
-
- /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
- * but in case of something, we filter the flags in first place
- */
- if (!(mapping->flags & AMDGPU_PTE_READABLE))
- flags &= ~AMDGPU_PTE_READABLE;
- if (!(mapping->flags & AMDGPU_PTE_WRITEABLE))
- flags &= ~AMDGPU_PTE_WRITEABLE;
-
- flags &= ~AMDGPU_PTE_EXECUTABLE;
- flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
-
- flags &= ~AMDGPU_PTE_MTYPE_MASK;
- flags |= (mapping->flags & AMDGPU_PTE_MTYPE_MASK);
-
- if ((mapping->flags & AMDGPU_PTE_PRT) &&
- (adev->asic_type >= CHIP_VEGA10)) {
- flags |= AMDGPU_PTE_PRT;
- flags &= ~AMDGPU_PTE_VALID;
- }
-
- trace_amdgpu_vm_bo_update(mapping);
-
- pfn = mapping->offset >> PAGE_SHIFT;
- if (nodes) {
- while (pfn >= nodes->size) {
- pfn -= nodes->size;
- ++nodes;
- }
- }
-
- do {
- uint64_t max_entries;
- uint64_t addr, last;
-
- if (nodes) {
- addr = nodes->start << PAGE_SHIFT;
- max_entries = (nodes->size - pfn) *
- (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
- } else {
- addr = 0;
- max_entries = S64_MAX;
- }
-
- if (pages_addr) {
- if (flags == gtt_flags)
- src = adev->gart.table_addr +
- (addr >> AMDGPU_GPU_PAGE_SHIFT) * 8;
- else
- max_entries = min(max_entries, 16ull * 1024ull);
- addr = 0;
- } else if (flags & AMDGPU_PTE_VALID) {
- addr += adev->vm_manager.vram_base_offset;
- }
- addr += pfn << PAGE_SHIFT;
-
- last = min((uint64_t)mapping->last, start + max_entries - 1);
- r = amdgpu_vm_bo_update_mapping(adev, exclusive,
- src, pages_addr, vm,
- start, last, flags, addr,
- fence);
- if (r)
- return r;
-
- pfn += last - start + 1;
- if (nodes && nodes->size == pfn) {
- pfn = 0;
- ++nodes;
- }
- start = last + 1;
-
- } while (unlikely(start != mapping->last + 1));
-
- return 0;
+ spin_lock(&vm->status_lock);
+ memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM);
+ spin_unlock(&vm->status_lock);
}
/**
@@ -1407,82 +1249,160 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
* @clear: if true clear the entries
*
* Fill in the page table entries for @bo_va.
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
*/
-int amdgpu_vm_bo_update(struct amdgpu_device *adev,
- struct amdgpu_bo_va *bo_va,
+int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
bool clear)
{
- struct amdgpu_vm *vm = bo_va->vm;
+ struct amdgpu_bo *bo = bo_va->base.bo;
+ struct amdgpu_vm *vm = bo_va->base.vm;
struct amdgpu_bo_va_mapping *mapping;
+ struct dma_fence **last_update;
dma_addr_t *pages_addr = NULL;
- uint64_t gtt_flags, flags;
- struct ttm_mem_reg *mem;
- struct drm_mm_node *nodes;
- struct dma_fence *exclusive;
+ struct ttm_resource *mem;
+ struct amdgpu_sync sync;
+ bool flush_tlb = clear;
+ uint64_t vram_base;
+ uint64_t flags;
+ bool uncached;
int r;
- if (clear || !bo_va->bo) {
+ amdgpu_sync_create(&sync);
+ if (clear) {
mem = NULL;
- nodes = NULL;
- exclusive = NULL;
+
+ /* Implicitly sync to command submissions in the same VM before
+ * unmapping.
+ */
+ r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
+ AMDGPU_SYNC_EQ_OWNER, vm);
+ if (r)
+ goto error_free;
+ if (bo) {
+ r = amdgpu_sync_kfd(&sync, bo->tbo.base.resv);
+ if (r)
+ goto error_free;
+ }
+ } else if (!bo) {
+ mem = NULL;
+
+ /* PRT map operations don't need to sync to anything. */
+
} else {
- struct ttm_dma_tt *ttm;
-
- mem = &bo_va->bo->tbo.mem;
- nodes = mem->mm_node;
- if (mem->mem_type == TTM_PL_TT) {
- ttm = container_of(bo_va->bo->tbo.ttm, struct
- ttm_dma_tt, ttm);
- pages_addr = ttm->dma_address;
+ struct drm_gem_object *obj = &bo->tbo.base;
+
+ if (drm_gem_is_imported(obj) && bo_va->is_xgmi) {
+ struct dma_buf *dma_buf = obj->import_attach->dmabuf;
+ struct drm_gem_object *gobj = dma_buf->priv;
+ struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
+
+ if (abo->tbo.resource &&
+ abo->tbo.resource->mem_type == TTM_PL_VRAM)
+ bo = gem_to_amdgpu_bo(gobj);
}
- exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
+ mem = bo->tbo.resource;
+ if (mem && (mem->mem_type == TTM_PL_TT ||
+ mem->mem_type == AMDGPU_PL_PREEMPT))
+ pages_addr = bo->tbo.ttm->dma_address;
+
+ /* Implicitly sync to moving fences before mapping anything */
+ r = amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv,
+ AMDGPU_SYNC_EXPLICIT, vm);
+ if (r)
+ goto error_free;
}
- if (bo_va->bo) {
- flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
- gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
- adev == amdgpu_ttm_adev(bo_va->bo->tbo.bdev)) ?
- flags : 0;
+ if (bo) {
+ struct amdgpu_device *bo_adev;
+
+ flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
+
+ if (amdgpu_bo_encrypted(bo))
+ flags |= AMDGPU_PTE_TMZ;
+
+ bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ vram_base = bo_adev->vm_manager.vram_base_offset;
+ uncached = (bo->flags & AMDGPU_GEM_CREATE_UNCACHED) != 0;
} else {
flags = 0x0;
- gtt_flags = ~0x0;
+ vram_base = 0;
+ uncached = false;
}
- spin_lock(&vm->status_lock);
- if (!list_empty(&bo_va->vm_status))
+ if (clear || amdgpu_vm_is_bo_always_valid(vm, bo))
+ last_update = &vm->last_update;
+ else
+ last_update = &bo_va->last_pt_update;
+
+ if (!clear && bo_va->base.moved) {
+ flush_tlb = true;
list_splice_init(&bo_va->valids, &bo_va->invalids);
- spin_unlock(&vm->status_lock);
+
+ } else if (bo_va->cleared != clear) {
+ list_splice_init(&bo_va->valids, &bo_va->invalids);
+ }
list_for_each_entry(mapping, &bo_va->invalids, list) {
- r = amdgpu_vm_bo_split_mapping(adev, exclusive,
- gtt_flags, pages_addr, vm,
- mapping, flags, nodes,
- &bo_va->last_pt_update);
+ uint64_t update_flags = flags;
+
+ /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
+ * but in case of something, we filter the flags in first place
+ */
+ if (!(mapping->flags & AMDGPU_VM_PAGE_READABLE))
+ update_flags &= ~AMDGPU_PTE_READABLE;
+ if (!(mapping->flags & AMDGPU_VM_PAGE_WRITEABLE))
+ update_flags &= ~AMDGPU_PTE_WRITEABLE;
+
+ /* Apply ASIC specific mapping flags */
+ amdgpu_gmc_get_vm_pte(adev, vm, bo, mapping->flags,
+ &update_flags);
+
+ trace_amdgpu_vm_bo_update(mapping);
+
+ r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
+ !uncached, &sync, mapping->start,
+ mapping->last, update_flags,
+ mapping->offset, vram_base, mem,
+ pages_addr, last_update);
if (r)
- return r;
+ goto error_free;
+ }
+
+ /* If the BO is not in its preferred location add it back to
+ * the evicted list so that it gets validated again on the
+ * next command submission.
+ */
+ if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
+ if (bo->tbo.resource &&
+ !(bo->preferred_domains &
+ amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type)))
+ amdgpu_vm_bo_evicted(&bo_va->base);
+ else
+ amdgpu_vm_bo_idle(&bo_va->base);
+ } else {
+ amdgpu_vm_bo_done(&bo_va->base);
}
+ list_splice_init(&bo_va->invalids, &bo_va->valids);
+ bo_va->cleared = clear;
+ bo_va->base.moved = false;
+
if (trace_amdgpu_vm_bo_mapping_enabled()) {
list_for_each_entry(mapping, &bo_va->valids, list)
trace_amdgpu_vm_bo_mapping(mapping);
-
- list_for_each_entry(mapping, &bo_va->invalids, list)
- trace_amdgpu_vm_bo_mapping(mapping);
}
- spin_lock(&vm->status_lock);
- list_splice_init(&bo_va->invalids, &bo_va->valids);
- list_del_init(&bo_va->vm_status);
- if (clear)
- list_add(&bo_va->vm_status, &vm->cleared);
- spin_unlock(&vm->status_lock);
-
- return 0;
+error_free:
+ amdgpu_sync_free(&sync);
+ return r;
}
/**
* amdgpu_vm_update_prt_state - update the global PRT state
+ *
+ * @adev: amdgpu_device pointer
*/
static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
{
@@ -1491,16 +1411,18 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
enable = !!atomic_read(&adev->vm_manager.num_prt_users);
- adev->gart.gart_funcs->set_prt(adev, enable);
+ adev->gmc.gmc_funcs->set_prt(adev, enable);
spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
}
/**
* amdgpu_vm_prt_get - add a PRT user
+ *
+ * @adev: amdgpu_device pointer
*/
static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
{
- if (!adev->gart.gart_funcs->set_prt)
+ if (!adev->gmc.gmc_funcs->set_prt)
return;
if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
@@ -1509,6 +1431,8 @@ static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
/**
* amdgpu_vm_prt_put - drop a PRT user
+ *
+ * @adev: amdgpu_device pointer
*/
static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
{
@@ -1518,6 +1442,9 @@ static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
/**
* amdgpu_vm_prt_cb - callback for updating the PRT status
+ *
+ * @fence: fence for the callback
+ * @_cb: the callback function
*/
static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
{
@@ -1529,13 +1456,16 @@ static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
/**
* amdgpu_vm_add_prt_cb - add callback for updating the PRT status
+ *
+ * @adev: amdgpu_device pointer
+ * @fence: fence for the callback
*/
static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
struct dma_fence *fence)
{
struct amdgpu_prt_cb *cb;
- if (!adev->gart.gart_funcs->set_prt)
+ if (!adev->gmc.gmc_funcs->set_prt)
return;
cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
@@ -1568,7 +1498,7 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *mapping,
struct dma_fence *fence)
{
- if (mapping->flags & AMDGPU_PTE_PRT)
+ if (mapping->flags & AMDGPU_VM_PAGE_PRT)
amdgpu_vm_add_prt_cb(adev, fence);
kfree(mapping);
}
@@ -1583,32 +1513,15 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
*/
static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
- struct reservation_object *resv = vm->root.bo->tbo.resv;
- struct dma_fence *excl, **shared;
- unsigned i, shared_count;
- int r;
-
- r = reservation_object_get_fences_rcu(resv, &excl,
- &shared_count, &shared);
- if (r) {
- /* Not enough memory to grab the fence list, as last resort
- * block for all the fences to complete.
- */
- reservation_object_wait_timeout_rcu(resv, true, false,
- MAX_SCHEDULE_TIMEOUT);
- return;
- }
+ struct dma_resv *resv = vm->root.bo->tbo.base.resv;
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
- /* Add a callback for each fence in the reservation object */
- amdgpu_vm_prt_get(adev);
- amdgpu_vm_add_prt_cb(adev, excl);
-
- for (i = 0; i < shared_count; ++i) {
+ dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
+ /* Add a callback for each fence in the reservation object */
amdgpu_vm_prt_get(adev);
- amdgpu_vm_add_prt_cb(adev, shared[i]);
+ amdgpu_vm_add_prt_cb(adev, fence);
}
-
- kfree(shared);
}
/**
@@ -1620,9 +1533,11 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
* or if an error occurred)
*
* Make sure all freed BOs are cleared in the PT.
- * Returns 0 for success.
- *
* PTs have to be reserved and mutex must be locked!
+ *
+ * Returns:
+ * 0 for success.
+ *
*/
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
@@ -1630,20 +1545,32 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
{
struct amdgpu_bo_va_mapping *mapping;
struct dma_fence *f = NULL;
+ struct amdgpu_sync sync;
int r;
+
+ /*
+ * Implicitly sync to command submissions in the same VM before
+ * unmapping.
+ */
+ amdgpu_sync_create(&sync);
+ r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
+ AMDGPU_SYNC_EQ_OWNER, vm);
+ if (r)
+ goto error_free;
+
while (!list_empty(&vm->freed)) {
mapping = list_first_entry(&vm->freed,
struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list);
- r = amdgpu_vm_bo_update_mapping(adev, NULL, 0, NULL, vm,
- mapping->start, mapping->last,
- 0, 0, &f);
+ r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
+ &sync, mapping->start, mapping->last,
+ 0, 0, 0, NULL, NULL, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
if (r) {
dma_fence_put(f);
- return r;
+ goto error_free;
}
}
@@ -1654,44 +1581,133 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
dma_fence_put(f);
}
- return 0;
+error_free:
+ amdgpu_sync_free(&sync);
+ return r;
}
/**
- * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
+ * amdgpu_vm_handle_moved - handle moved BOs in the PT
*
* @adev: amdgpu_device pointer
* @vm: requested vm
+ * @ticket: optional reservation ticket used to reserve the VM
*
- * Make sure all invalidated BOs are cleared in the PT.
- * Returns 0 for success.
+ * Make sure all BOs which are moved are updated in the PTs.
*
- * PTs have to be reserved and mutex must be locked!
+ * Returns:
+ * 0 for success.
+ *
+ * PTs have to be reserved!
*/
-int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, struct amdgpu_sync *sync)
+int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct ww_acquire_ctx *ticket)
{
- struct amdgpu_bo_va *bo_va = NULL;
- int r = 0;
+ struct amdgpu_bo_va *bo_va;
+ struct dma_resv *resv;
+ bool clear, unlock;
+ int r;
spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->moved)) {
+ bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
+ base.vm_status);
+ spin_unlock(&vm->status_lock);
+
+ /* Per VM BOs never need to bo cleared in the page tables */
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
+ if (r)
+ return r;
+ spin_lock(&vm->status_lock);
+ }
+
while (!list_empty(&vm->invalidated)) {
- bo_va = list_first_entry(&vm->invalidated,
- struct amdgpu_bo_va, vm_status);
+ bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
+ base.vm_status);
+ resv = bo_va->base.bo->tbo.base.resv;
spin_unlock(&vm->status_lock);
- r = amdgpu_vm_bo_update(adev, bo_va, true);
+ /* Try to reserve the BO to avoid clearing its ptes */
+ if (!adev->debug_vm && dma_resv_trylock(resv)) {
+ clear = false;
+ unlock = true;
+ /* The caller is already holding the reservation lock */
+ } else if (ticket && dma_resv_locking_ctx(resv) == ticket) {
+ clear = false;
+ unlock = false;
+ /* Somebody else is using the BO right now */
+ } else {
+ clear = true;
+ unlock = false;
+ }
+
+ r = amdgpu_vm_bo_update(adev, bo_va, clear);
+
+ if (unlock)
+ dma_resv_unlock(resv);
if (r)
return r;
+ /* Remember evicted DMABuf imports in compute VMs for later
+ * validation
+ */
+ if (vm->is_compute_context &&
+ drm_gem_is_imported(&bo_va->base.bo->tbo.base) &&
+ (!bo_va->base.bo->tbo.resource ||
+ bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
+ amdgpu_vm_bo_evicted_user(&bo_va->base);
+
spin_lock(&vm->status_lock);
}
spin_unlock(&vm->status_lock);
- if (bo_va)
- r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
+ return 0;
+}
+/**
+ * amdgpu_vm_flush_compute_tlb - Flush TLB on compute VM
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ * @flush_type: flush type
+ * @xcc_mask: mask of XCCs that belong to the compute partition in need of a TLB flush.
+ *
+ * Flush TLB if needed for a compute VM.
+ *
+ * Returns:
+ * 0 for success.
+ */
+int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ uint32_t flush_type,
+ uint32_t xcc_mask)
+{
+ uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
+ bool all_hub = false;
+ int xcc = 0, r = 0;
+
+ WARN_ON_ONCE(!vm->is_compute_context);
+
+ /*
+ * It can be that we race and lose here, but that is extremely unlikely
+ * and the worst thing which could happen is that we flush the changes
+ * into the TLB once more which is harmless.
+ */
+ if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq)
+ return 0;
+
+ if (adev->family == AMDGPU_FAMILY_AI ||
+ adev->family == AMDGPU_FAMILY_RV)
+ all_hub = true;
+
+ for_each_inst(xcc, xcc_mask) {
+ r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type,
+ all_hub, xcc);
+ if (r)
+ break;
+ }
return r;
}
@@ -1704,7 +1720,9 @@ int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
*
* Add @bo into the requested vm.
* Add @bo to the list of bos associated with the vm
- * Returns newly added bo_va or NULL for failure
+ *
+ * Returns:
+ * Newly added bo_va or NULL for failure
*
* Object has to be reserved!
*/
@@ -1718,20 +1736,87 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
if (bo_va == NULL) {
return NULL;
}
- bo_va->vm = vm;
- bo_va->bo = bo;
+ amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
+
bo_va->ref_count = 1;
- INIT_LIST_HEAD(&bo_va->bo_list);
+ bo_va->last_pt_update = dma_fence_get_stub();
INIT_LIST_HEAD(&bo_va->valids);
INIT_LIST_HEAD(&bo_va->invalids);
- INIT_LIST_HEAD(&bo_va->vm_status);
- if (bo)
- list_add_tail(&bo_va->bo_list, &bo->va);
+ if (!bo)
+ return bo_va;
+
+ dma_resv_assert_held(bo->tbo.base.resv);
+ if (amdgpu_dmabuf_is_xgmi_accessible(adev, bo)) {
+ bo_va->is_xgmi = true;
+ /* Power up XGMI if it can be potentially used */
+ amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
+ }
return bo_va;
}
+
+/**
+ * amdgpu_vm_bo_insert_map - insert a new mapping
+ *
+ * @adev: amdgpu_device pointer
+ * @bo_va: bo_va to store the address
+ * @mapping: the mapping to insert
+ *
+ * Insert a new mapping into all structures.
+ */
+static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+ struct amdgpu_bo_va_mapping *mapping)
+{
+ struct amdgpu_vm *vm = bo_va->base.vm;
+ struct amdgpu_bo *bo = bo_va->base.bo;
+
+ mapping->bo_va = bo_va;
+ list_add(&mapping->list, &bo_va->invalids);
+ amdgpu_vm_it_insert(mapping, &vm->va);
+
+ if (mapping->flags & AMDGPU_VM_PAGE_PRT)
+ amdgpu_vm_prt_get(adev);
+
+ if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
+ amdgpu_vm_bo_moved(&bo_va->base);
+
+ trace_amdgpu_vm_bo_map(bo_va, mapping);
+}
+
+/* Validate operation parameters to prevent potential abuse */
+static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
+ struct amdgpu_bo *bo,
+ uint64_t saddr,
+ uint64_t offset,
+ uint64_t size)
+{
+ uint64_t tmp, lpfn;
+
+ if (saddr & AMDGPU_GPU_PAGE_MASK
+ || offset & AMDGPU_GPU_PAGE_MASK
+ || size & AMDGPU_GPU_PAGE_MASK)
+ return -EINVAL;
+
+ if (check_add_overflow(saddr, size, &tmp)
+ || check_add_overflow(offset, size, &tmp)
+ || size == 0 /* which also leads to end < begin */)
+ return -EINVAL;
+
+ /* make sure object fit at this offset */
+ if (bo && offset + size > amdgpu_bo_size(bo))
+ return -EINVAL;
+
+ /* Ensure last pfn not exceed max_pfn */
+ lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
+ if (lpfn >= adev->vm_manager.max_pfn)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
* amdgpu_vm_bo_map - map bo inside a vm
*
@@ -1739,41 +1824,39 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
* @bo_va: bo_va to store the address
* @saddr: where to map the BO
* @offset: requested offset in the BO
+ * @size: BO size in bytes
* @flags: attributes of pages (read/write/valid/etc.)
*
* Add a mapping of the BO at the specefied addr into the VM.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*
* Object has to be reserved and unreserved outside!
*/
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t saddr, uint64_t offset,
- uint64_t size, uint64_t flags)
+ uint64_t size, uint32_t flags)
{
struct amdgpu_bo_va_mapping *mapping, *tmp;
- struct amdgpu_vm *vm = bo_va->vm;
+ struct amdgpu_bo *bo = bo_va->base.bo;
+ struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr;
+ int r;
- /* validate the parameters */
- if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
- size == 0 || size & AMDGPU_GPU_PAGE_MASK)
- return -EINVAL;
-
- /* make sure object fit at this offset */
- eaddr = saddr + size - 1;
- if (saddr >= eaddr ||
- (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
- return -EINVAL;
+ r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
+ if (r)
+ return r;
saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
if (tmp) {
/* bo and tmp overlap, invalid addr */
dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
- "0x%010Lx-0x%010Lx\n", bo_va->bo, saddr, eaddr,
+ "0x%010Lx-0x%010Lx\n", bo, saddr, eaddr,
tmp->start, tmp->last + 1);
return -EINVAL;
}
@@ -1782,17 +1865,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
if (!mapping)
return -ENOMEM;
- INIT_LIST_HEAD(&mapping->list);
mapping->start = saddr;
mapping->last = eaddr;
mapping->offset = offset;
mapping->flags = flags;
- list_add(&mapping->list, &bo_va->invalids);
- amdgpu_vm_it_insert(mapping, &vm->va);
-
- if (flags & AMDGPU_PTE_PRT)
- amdgpu_vm_prt_get(adev);
+ amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
return 0;
}
@@ -1804,59 +1882,51 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
* @bo_va: bo_va to store the address
* @saddr: where to map the BO
* @offset: requested offset in the BO
+ * @size: BO size in bytes
* @flags: attributes of pages (read/write/valid/etc.)
*
* Add a mapping of the BO at the specefied addr into the VM. Replace existing
* mappings as we do so.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*
* Object has to be reserved and unreserved outside!
*/
int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t saddr, uint64_t offset,
- uint64_t size, uint64_t flags)
+ uint64_t size, uint32_t flags)
{
struct amdgpu_bo_va_mapping *mapping;
- struct amdgpu_vm *vm = bo_va->vm;
+ struct amdgpu_bo *bo = bo_va->base.bo;
uint64_t eaddr;
int r;
- /* validate the parameters */
- if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
- size == 0 || size & AMDGPU_GPU_PAGE_MASK)
- return -EINVAL;
-
- /* make sure object fit at this offset */
- eaddr = saddr + size - 1;
- if (saddr >= eaddr ||
- (bo_va->bo && offset + size > amdgpu_bo_size(bo_va->bo)))
- return -EINVAL;
+ r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
+ if (r)
+ return r;
/* Allocate all the needed memory */
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
if (!mapping)
return -ENOMEM;
- r = amdgpu_vm_bo_clear_mappings(adev, bo_va->vm, saddr, size);
+ r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
if (r) {
kfree(mapping);
return r;
}
saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
mapping->start = saddr;
mapping->last = eaddr;
mapping->offset = offset;
mapping->flags = flags;
- list_add(&mapping->list, &bo_va->invalids);
- amdgpu_vm_it_insert(mapping, &vm->va);
-
- if (flags & AMDGPU_PTE_PRT)
- amdgpu_vm_prt_get(adev);
+ amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
return 0;
}
@@ -1869,7 +1939,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
* @saddr: where to the BO is mapped
*
* Remove a mapping of the BO at the specefied addr from the VM.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*
* Object has to be reserved and unreserved outside!
*/
@@ -1878,7 +1950,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
uint64_t saddr)
{
struct amdgpu_bo_va_mapping *mapping;
- struct amdgpu_vm *vm = bo_va->vm;
+ struct amdgpu_vm *vm = bo_va->base.vm;
bool valid = true;
saddr /= AMDGPU_GPU_PAGE_SIZE;
@@ -1902,6 +1974,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
list_del(&mapping->list);
amdgpu_vm_it_remove(mapping, &vm->va);
+ mapping->bo_va = NULL;
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
if (valid)
@@ -1922,7 +1995,9 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
* @size: size of the range
*
* Remove all mappings in a range, split them as appropriate.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*/
int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
@@ -1931,10 +2006,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
LIST_HEAD(removed);
uint64_t eaddr;
+ int r;
+
+ r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
+ if (r)
+ return r;
- eaddr = saddr + size - 1;
saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
/* Allocate all the needed memory */
before = kzalloc(sizeof(*before), GFP_KERNEL);
@@ -1958,7 +2037,8 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
before->last = saddr - 1;
before->offset = tmp->offset;
before->flags = tmp->flags;
- list_add(&before->list, &tmp->list);
+ before->bo_va = tmp->bo_va;
+ list_add(&before->list, &tmp->bo_va->invalids);
}
/* Remember mapping split at the end */
@@ -1966,9 +2046,10 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
after->start = eaddr + 1;
after->last = tmp->last;
after->offset = tmp->offset;
- after->offset += after->start - tmp->start;
+ after->offset += (after->start - tmp->start) << PAGE_SHIFT;
after->flags = tmp->flags;
- list_add(&after->list, &tmp->list);
+ after->bo_va = tmp->bo_va;
+ list_add(&after->list, &tmp->bo_va->invalids);
}
list_del(&tmp->list);
@@ -1987,24 +2068,37 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
if (tmp->last > eaddr)
tmp->last = eaddr;
+ tmp->bo_va = NULL;
list_add(&tmp->list, &vm->freed);
trace_amdgpu_vm_bo_unmap(NULL, tmp);
}
/* Insert partial mapping before the range */
if (!list_empty(&before->list)) {
+ struct amdgpu_bo *bo = before->bo_va->base.bo;
+
amdgpu_vm_it_insert(before, &vm->va);
- if (before->flags & AMDGPU_PTE_PRT)
+ if (before->flags & AMDGPU_PTE_PRT_FLAG(adev))
amdgpu_vm_prt_get(adev);
+
+ if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
+ !before->bo_va->base.moved)
+ amdgpu_vm_bo_moved(&before->bo_va->base);
} else {
kfree(before);
}
/* Insert partial mapping after the range */
if (!list_empty(&after->list)) {
+ struct amdgpu_bo *bo = after->bo_va->base.bo;
+
amdgpu_vm_it_insert(after, &vm->va);
- if (after->flags & AMDGPU_PTE_PRT)
+ if (after->flags & AMDGPU_PTE_PRT_FLAG(adev))
amdgpu_vm_prt_get(adev);
+
+ if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
+ !after->bo_va->base.moved)
+ amdgpu_vm_bo_moved(&after->bo_va->base);
} else {
kfree(after);
}
@@ -2013,7 +2107,55 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
}
/**
- * amdgpu_vm_bo_rmv - remove a bo to a specific vm
+ * amdgpu_vm_bo_lookup_mapping - find mapping by address
+ *
+ * @vm: the requested VM
+ * @addr: the address
+ *
+ * Find a mapping by it's address.
+ *
+ * Returns:
+ * The amdgpu_bo_va_mapping matching for addr or NULL
+ *
+ */
+struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
+ uint64_t addr)
+{
+ return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
+}
+
+/**
+ * amdgpu_vm_bo_trace_cs - trace all reserved mappings
+ *
+ * @vm: the requested vm
+ * @ticket: CS ticket
+ *
+ * Trace all mappings of BOs reserved during a command submission.
+ */
+void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
+{
+ struct amdgpu_bo_va_mapping *mapping;
+
+ if (!trace_amdgpu_vm_bo_cs_enabled())
+ return;
+
+ for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
+ mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
+ if (mapping->bo_va && mapping->bo_va->base.bo) {
+ struct amdgpu_bo *bo;
+
+ bo = mapping->bo_va->base.bo;
+ if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
+ ticket)
+ continue;
+ }
+
+ trace_amdgpu_vm_bo_cs(mapping);
+ }
+}
+
+/**
+ * amdgpu_vm_bo_del - remove a bo from a specific vm
*
* @adev: amdgpu_device pointer
* @bo_va: requested bo_va
@@ -2022,21 +2164,40 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
*
* Object have to be reserved!
*/
-void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
+void amdgpu_vm_bo_del(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va)
{
struct amdgpu_bo_va_mapping *mapping, *next;
- struct amdgpu_vm *vm = bo_va->vm;
+ struct amdgpu_bo *bo = bo_va->base.bo;
+ struct amdgpu_vm *vm = bo_va->base.vm;
+ struct amdgpu_vm_bo_base **base;
+
+ dma_resv_assert_held(vm->root.bo->tbo.base.resv);
+
+ if (bo) {
+ dma_resv_assert_held(bo->tbo.base.resv);
+ if (amdgpu_vm_is_bo_always_valid(vm, bo))
+ ttm_bo_set_bulk_move(&bo->tbo, NULL);
- list_del(&bo_va->bo_list);
+ for (base = &bo_va->base.bo->vm_bo; *base;
+ base = &(*base)->next) {
+ if (*base != &bo_va->base)
+ continue;
+
+ amdgpu_vm_update_stats(*base, bo->tbo.resource, -1);
+ *base = bo_va->base.next;
+ break;
+ }
+ }
spin_lock(&vm->status_lock);
- list_del(&bo_va->vm_status);
+ list_del(&bo_va->base.vm_status);
spin_unlock(&vm->status_lock);
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
amdgpu_vm_it_remove(mapping, &vm->va);
+ mapping->bo_va = NULL;
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
list_add(&mapping->list, &vm->freed);
}
@@ -2048,31 +2209,114 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
}
dma_fence_put(bo_va->last_pt_update);
+
+ if (bo && bo_va->is_xgmi)
+ amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
+
kfree(bo_va);
}
/**
+ * amdgpu_vm_evictable - check if we can evict a VM
+ *
+ * @bo: A page table of the VM.
+ *
+ * Check if it is possible to evict a VM.
+ */
+bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
+{
+ struct amdgpu_vm_bo_base *bo_base = bo->vm_bo;
+
+ /* Page tables of a destroyed VM can go away immediately */
+ if (!bo_base || !bo_base->vm)
+ return true;
+
+ /* Don't evict VM page tables while they are busy */
+ if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
+ return false;
+
+ /* Try to block ongoing updates */
+ if (!amdgpu_vm_eviction_trylock(bo_base->vm))
+ return false;
+
+ /* Don't evict VM page tables while they are updated */
+ if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
+ amdgpu_vm_eviction_unlock(bo_base->vm);
+ return false;
+ }
+
+ bo_base->vm->evicting = true;
+ amdgpu_vm_eviction_unlock(bo_base->vm);
+ return true;
+}
+
+/**
* amdgpu_vm_bo_invalidate - mark the bo as invalid
*
- * @adev: amdgpu_device pointer
- * @vm: requested vm
* @bo: amdgpu buffer object
+ * @evicted: is the BO evicted
*
* Mark @bo as invalid.
*/
-void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
- struct amdgpu_bo *bo)
+void amdgpu_vm_bo_invalidate(struct amdgpu_bo *bo, bool evicted)
{
- struct amdgpu_bo_va *bo_va;
+ struct amdgpu_vm_bo_base *bo_base;
+
+ for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
+ struct amdgpu_vm *vm = bo_base->vm;
+
+ if (evicted && amdgpu_vm_is_bo_always_valid(vm, bo)) {
+ amdgpu_vm_bo_evicted(bo_base);
+ continue;
+ }
+
+ if (bo_base->moved)
+ continue;
+ bo_base->moved = true;
- list_for_each_entry(bo_va, &bo->va, bo_list) {
- spin_lock(&bo_va->vm->status_lock);
- if (list_empty(&bo_va->vm_status))
- list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
- spin_unlock(&bo_va->vm->status_lock);
+ if (bo->tbo.type == ttm_bo_type_kernel)
+ amdgpu_vm_bo_relocated(bo_base);
+ else if (amdgpu_vm_is_bo_always_valid(vm, bo))
+ amdgpu_vm_bo_moved(bo_base);
+ else
+ amdgpu_vm_bo_invalidated(bo_base);
}
}
+/**
+ * amdgpu_vm_bo_move - handle BO move
+ *
+ * @bo: amdgpu buffer object
+ * @new_mem: the new placement of the BO move
+ * @evicted: is the BO evicted
+ *
+ * Update the memory stats for the new placement and mark @bo as invalid.
+ */
+void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
+ bool evicted)
+{
+ struct amdgpu_vm_bo_base *bo_base;
+
+ for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
+ struct amdgpu_vm *vm = bo_base->vm;
+
+ spin_lock(&vm->status_lock);
+ amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1);
+ amdgpu_vm_update_stats_locked(bo_base, new_mem, +1);
+ spin_unlock(&vm->status_lock);
+ }
+
+ amdgpu_vm_bo_invalidate(bo, evicted);
+}
+
+/**
+ * amdgpu_vm_get_block_size - calculate VM page table size as power of two
+ *
+ * @vm_size: VM size
+ *
+ * Returns:
+ * VM page table as power of two
+ */
static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
{
/* Total bits covered by PD + PTs */
@@ -2087,28 +2331,216 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
}
/**
- * amdgpu_vm_adjust_size - adjust vm size and block size
+ * amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
*
* @adev: amdgpu_device pointer
- * @vm_size: the default vm size if it's set auto
+ * @min_vm_size: the minimum vm size in GB if it's set auto
+ * @fragment_size_default: Default PTE fragment size
+ * @max_level: max VMPT level
+ * @max_bits: max address space size in bits
+ *
*/
-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size)
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
+ uint32_t fragment_size_default, unsigned max_level,
+ unsigned max_bits)
{
- /* adjust vm size firstly */
- if (amdgpu_vm_size == -1)
- adev->vm_manager.vm_size = vm_size;
- else
- adev->vm_manager.vm_size = amdgpu_vm_size;
+ unsigned int max_size = 1 << (max_bits - 30);
+ unsigned int vm_size;
+ uint64_t tmp;
+
+ /* adjust vm size first */
+ if (amdgpu_vm_size != -1) {
+ vm_size = amdgpu_vm_size;
+ if (vm_size > max_size) {
+ dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
+ amdgpu_vm_size, max_size);
+ vm_size = max_size;
+ }
+ } else {
+ struct sysinfo si;
+ unsigned int phys_ram_gb;
+
+ /* Optimal VM size depends on the amount of physical
+ * RAM available. Underlying requirements and
+ * assumptions:
+ *
+ * - Need to map system memory and VRAM from all GPUs
+ * - VRAM from other GPUs not known here
+ * - Assume VRAM <= system memory
+ * - On GFX8 and older, VM space can be segmented for
+ * different MTYPEs
+ * - Need to allow room for fragmentation, guard pages etc.
+ *
+ * This adds up to a rough guess of system memory x3.
+ * Round up to power of two to maximize the available
+ * VM size with the given page table size.
+ */
+ si_meminfo(&si);
+ phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
+ (1 << 30) - 1) >> 30;
+ vm_size = roundup_pow_of_two(
+ clamp(phys_ram_gb * 3, min_vm_size, max_size));
+ }
- /* block size depends on vm size */
- if (amdgpu_vm_block_size == -1)
+ adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
+
+ tmp = roundup_pow_of_two(adev->vm_manager.max_pfn);
+ if (amdgpu_vm_block_size != -1)
+ tmp >>= amdgpu_vm_block_size - 9;
+ tmp = DIV_ROUND_UP(fls64(tmp) - 1, 9) - 1;
+ adev->vm_manager.num_level = min_t(unsigned int, max_level, tmp);
+ switch (adev->vm_manager.num_level) {
+ case 3:
+ adev->vm_manager.root_level = AMDGPU_VM_PDB2;
+ break;
+ case 2:
+ adev->vm_manager.root_level = AMDGPU_VM_PDB1;
+ break;
+ case 1:
+ adev->vm_manager.root_level = AMDGPU_VM_PDB0;
+ break;
+ default:
+ dev_err(adev->dev, "VMPT only supports 2~4+1 levels\n");
+ }
+ /* block size depends on vm size and hw setup*/
+ if (amdgpu_vm_block_size != -1)
adev->vm_manager.block_size =
- amdgpu_vm_get_block_size(adev->vm_manager.vm_size);
+ min((unsigned)amdgpu_vm_block_size, max_bits
+ - AMDGPU_GPU_PAGE_SHIFT
+ - 9 * adev->vm_manager.num_level);
+ else if (adev->vm_manager.num_level > 1)
+ adev->vm_manager.block_size = 9;
+ else
+ adev->vm_manager.block_size = amdgpu_vm_get_block_size(tmp);
+
+ if (amdgpu_vm_fragment_size == -1)
+ adev->vm_manager.fragment_size = fragment_size_default;
else
- adev->vm_manager.block_size = amdgpu_vm_block_size;
+ adev->vm_manager.fragment_size = amdgpu_vm_fragment_size;
+
+ dev_info(
+ adev->dev,
+ "vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n",
+ vm_size, adev->vm_manager.num_level + 1,
+ adev->vm_manager.block_size, adev->vm_manager.fragment_size);
+}
+
+/**
+ * amdgpu_vm_wait_idle - wait for the VM to become idle
+ *
+ * @vm: VM object to wait for
+ * @timeout: timeout to wait for VM to become idle
+ */
+long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
+{
+ timeout = drm_sched_entity_flush(&vm->immediate, timeout);
+ if (timeout <= 0)
+ return timeout;
+
+ return drm_sched_entity_flush(&vm->delayed, timeout);
+}
+
+static void amdgpu_vm_destroy_task_info(struct kref *kref)
+{
+ struct amdgpu_task_info *ti = container_of(kref, struct amdgpu_task_info, refcount);
+
+ kfree(ti);
+}
+
+static inline struct amdgpu_vm *
+amdgpu_vm_get_vm_from_pasid(struct amdgpu_device *adev, u32 pasid)
+{
+ struct amdgpu_vm *vm;
+ unsigned long flags;
+
+ xa_lock_irqsave(&adev->vm_manager.pasids, flags);
+ vm = xa_load(&adev->vm_manager.pasids, pasid);
+ xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
+
+ return vm;
+}
+
+/**
+ * amdgpu_vm_put_task_info - reference down the vm task_info ptr
+ *
+ * @task_info: task_info struct under discussion.
+ *
+ * frees the vm task_info ptr at the last put
+ */
+void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info)
+{
+ if (task_info)
+ kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
+}
+
+/**
+ * amdgpu_vm_get_task_info_vm - Extracts task info for a vm.
+ *
+ * @vm: VM to get info from
+ *
+ * Returns the reference counted task_info structure, which must be
+ * referenced down with amdgpu_vm_put_task_info.
+ */
+struct amdgpu_task_info *
+amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm)
+{
+ struct amdgpu_task_info *ti = NULL;
+
+ if (vm) {
+ ti = vm->task_info;
+ kref_get(&vm->task_info->refcount);
+ }
- DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
- adev->vm_manager.vm_size, adev->vm_manager.block_size);
+ return ti;
+}
+
+/**
+ * amdgpu_vm_get_task_info_pasid - Extracts task info for a PASID.
+ *
+ * @adev: drm device pointer
+ * @pasid: PASID identifier for VM
+ *
+ * Returns the reference counted task_info structure, which must be
+ * referenced down with amdgpu_vm_put_task_info.
+ */
+struct amdgpu_task_info *
+amdgpu_vm_get_task_info_pasid(struct amdgpu_device *adev, u32 pasid)
+{
+ return amdgpu_vm_get_task_info_vm(
+ amdgpu_vm_get_vm_from_pasid(adev, pasid));
+}
+
+static int amdgpu_vm_create_task_info(struct amdgpu_vm *vm)
+{
+ vm->task_info = kzalloc(sizeof(struct amdgpu_task_info), GFP_KERNEL);
+ if (!vm->task_info)
+ return -ENOMEM;
+
+ kref_init(&vm->task_info->refcount);
+ return 0;
+}
+
+/**
+ * amdgpu_vm_set_task_info - Sets VMs task info.
+ *
+ * @vm: vm for which to set the info
+ */
+void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
+{
+ if (!vm->task_info)
+ return;
+
+ if (vm->task_info->task.pid == current->pid)
+ return;
+
+ vm->task_info->task.pid = current->pid;
+ get_task_comm(vm->task_info->task.comm, current);
+
+ if (current->group_leader->mm != current->mm)
+ return;
+
+ vm->task_info->tgid = current->group_leader->pid;
+ get_task_comm(vm->task_info->process_name, current->group_leader);
}
/**
@@ -2116,89 +2548,193 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size)
*
* @adev: amdgpu_device pointer
* @vm: requested vm
+ * @xcp_id: GPU partition selection id
+ * @pasid: the pasid the VM is using on this GPU
*
* Init @vm fields.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*/
-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int32_t xcp_id, uint32_t pasid)
{
- const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
- AMDGPU_VM_PTE_COUNT(adev) * 8);
- unsigned ring_instance;
- struct amdgpu_ring *ring;
- struct amd_sched_rq *rq;
- int r;
-
- vm->va = RB_ROOT;
- vm->client_id = atomic64_inc_return(&adev->vm_manager.client_counter);
- spin_lock_init(&vm->status_lock);
+ struct amdgpu_bo *root_bo;
+ struct amdgpu_bo_vm *root;
+ int r, i;
+
+ vm->va = RB_ROOT_CACHED;
+ for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
+ vm->reserved_vmid[i] = NULL;
+ INIT_LIST_HEAD(&vm->evicted);
+ INIT_LIST_HEAD(&vm->evicted_user);
+ INIT_LIST_HEAD(&vm->relocated);
+ INIT_LIST_HEAD(&vm->moved);
+ INIT_LIST_HEAD(&vm->idle);
INIT_LIST_HEAD(&vm->invalidated);
- INIT_LIST_HEAD(&vm->cleared);
+ spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->freed);
+ INIT_LIST_HEAD(&vm->done);
+ INIT_KFIFO(vm->faults);
- /* create scheduler entity for page table updates */
-
- ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
- ring_instance %= adev->vm_manager.vm_pte_num_rings;
- ring = adev->vm_manager.vm_pte_rings[ring_instance];
- rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
- r = amd_sched_entity_init(&ring->sched, &vm->entity,
- rq, amdgpu_sched_jobs);
+ r = amdgpu_vm_init_entities(adev, vm);
if (r)
return r;
- vm->last_dir_update = NULL;
+ ttm_lru_bulk_move_init(&vm->lru_bulk_move);
+
+ vm->is_compute_context = false;
+
+ vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
+ AMDGPU_VM_USE_CPU_FOR_GFX);
+
+ dev_dbg(adev->dev, "VM update mode is %s\n",
+ vm->use_cpu_for_update ? "CPU" : "SDMA");
+ WARN_ONCE((vm->use_cpu_for_update &&
+ !amdgpu_gmc_vram_full_visible(&adev->gmc)),
+ "CPU update of VM recommended only for large BAR system\n");
+
+ if (vm->use_cpu_for_update)
+ vm->update_funcs = &amdgpu_vm_cpu_funcs;
+ else
+ vm->update_funcs = &amdgpu_vm_sdma_funcs;
+
+ vm->last_update = dma_fence_get_stub();
+ vm->last_unlocked = dma_fence_get_stub();
+ vm->last_tlb_flush = dma_fence_get_stub();
+ vm->generation = amdgpu_vm_generation(adev, NULL);
+
+ mutex_init(&vm->eviction_lock);
+ vm->evicting = false;
+ vm->tlb_fence_context = dma_fence_context_alloc(1);
- r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_SHADOW |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
- AMDGPU_GEM_CREATE_VRAM_CLEARED,
- NULL, NULL, &vm->root.bo);
+ r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
+ false, &root, xcp_id);
if (r)
- goto error_free_sched_entity;
+ goto error_free_delayed;
- r = amdgpu_bo_reserve(vm->root.bo, false);
+ root_bo = amdgpu_bo_ref(&root->bo);
+ r = amdgpu_bo_reserve(root_bo, true);
+ if (r) {
+ amdgpu_bo_unref(&root_bo);
+ goto error_free_delayed;
+ }
+
+ amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
+ r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
+ if (r)
+ goto error_free_root;
+
+ r = amdgpu_vm_pt_clear(adev, vm, root, false);
if (r)
goto error_free_root;
- vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
+ r = amdgpu_vm_create_task_info(vm);
+ if (r)
+ dev_dbg(adev->dev, "Failed to create task info for VM\n");
+
+ /* Store new PASID in XArray (if non-zero) */
+ if (pasid != 0) {
+ r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, GFP_KERNEL));
+ if (r < 0)
+ goto error_free_root;
+
+ vm->pasid = pasid;
+ }
+
amdgpu_bo_unreserve(vm->root.bo);
+ amdgpu_bo_unref(&root_bo);
return 0;
error_free_root:
- amdgpu_bo_unref(&vm->root.bo->shadow);
- amdgpu_bo_unref(&vm->root.bo);
- vm->root.bo = NULL;
+ /* If PASID was partially set, erase it from XArray before failing */
+ if (vm->pasid != 0) {
+ xa_erase_irq(&adev->vm_manager.pasids, vm->pasid);
+ vm->pasid = 0;
+ }
+ amdgpu_vm_pt_free_root(adev, vm);
+ amdgpu_bo_unreserve(vm->root.bo);
+ amdgpu_bo_unref(&root_bo);
-error_free_sched_entity:
- amd_sched_entity_fini(&ring->sched, &vm->entity);
+error_free_delayed:
+ dma_fence_put(vm->last_tlb_flush);
+ dma_fence_put(vm->last_unlocked);
+ ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
+ amdgpu_vm_fini_entities(vm);
return r;
}
/**
- * amdgpu_vm_free_levels - free PD/PT levels
+ * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
*
- * @level: PD/PT starting level to free
+ * This only works on GFX VMs that don't have any BOs added and no
+ * page tables allocated yet.
*
- * Free the page directory or page table level and all sub levels.
+ * Changes the following VM parameters:
+ * - use_cpu_for_update
+ * - pte_supports_ats
+ *
+ * Reinitializes the page directory to reflect the changed ATS
+ * setting.
+ *
+ * Returns:
+ * 0 for success, -errno for errors.
*/
-static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
+int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
- unsigned i;
+ int r;
- if (level->bo) {
- amdgpu_bo_unref(&level->bo->shadow);
- amdgpu_bo_unref(&level->bo);
+ r = amdgpu_bo_reserve(vm->root.bo, true);
+ if (r)
+ return r;
+
+ /* Update VM state */
+ vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
+ AMDGPU_VM_USE_CPU_FOR_COMPUTE);
+ dev_dbg(adev->dev, "VM update mode is %s\n",
+ vm->use_cpu_for_update ? "CPU" : "SDMA");
+ WARN_ONCE((vm->use_cpu_for_update &&
+ !amdgpu_gmc_vram_full_visible(&adev->gmc)),
+ "CPU update of VM recommended only for large BAR system\n");
+
+ if (vm->use_cpu_for_update) {
+ /* Sync with last SDMA update/clear before switching to CPU */
+ r = amdgpu_bo_sync_wait(vm->root.bo,
+ AMDGPU_FENCE_OWNER_UNDEFINED, true);
+ if (r)
+ goto unreserve_bo;
+
+ vm->update_funcs = &amdgpu_vm_cpu_funcs;
+ r = amdgpu_vm_pt_map_tables(adev, vm);
+ if (r)
+ goto unreserve_bo;
+
+ } else {
+ vm->update_funcs = &amdgpu_vm_sdma_funcs;
}
- if (level->entries)
- for (i = 0; i <= level->last_entry_used; i++)
- amdgpu_vm_free_levels(&level->entries[i]);
+ dma_fence_put(vm->last_update);
+ vm->last_update = dma_fence_get_stub();
+ vm->is_compute_context = true;
- drm_free_large(level->entries);
+unreserve_bo:
+ amdgpu_bo_unreserve(vm->root.bo);
+ return r;
+}
+
+static int amdgpu_vm_stats_is_zero(struct amdgpu_vm *vm)
+{
+ for (int i = 0; i < __AMDGPU_PL_NUM; ++i) {
+ if (!(drm_memory_stats_is_zero(&vm->stats[i].drm) &&
+ vm->stats[i].evicted == 0))
+ return false;
+ }
+ return true;
}
/**
@@ -2213,20 +2749,30 @@ static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
struct amdgpu_bo_va_mapping *mapping, *tmp;
- bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
+ bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
+ struct amdgpu_bo *root;
+ unsigned long flags;
+ int i;
- amd_sched_entity_fini(vm->entity.sched, &vm->entity);
+ amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
- if (!RB_EMPTY_ROOT(&vm->va)) {
- dev_err(adev->dev, "still active bo inside vm\n");
- }
- rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, rb) {
- list_del(&mapping->list);
- amdgpu_vm_it_remove(mapping, &vm->va);
- kfree(mapping);
+ root = amdgpu_bo_ref(vm->root.bo);
+ amdgpu_bo_reserve(root, true);
+ /* Remove PASID mapping before destroying VM */
+ if (vm->pasid != 0) {
+ xa_erase_irq(&adev->vm_manager.pasids, vm->pasid);
+ vm->pasid = 0;
}
+ dma_fence_wait(vm->last_unlocked, false);
+ dma_fence_put(vm->last_unlocked);
+ dma_fence_wait(vm->last_tlb_flush, false);
+ /* Make sure that all fence callbacks have completed */
+ spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
+ spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
+ dma_fence_put(vm->last_tlb_flush);
+
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
- if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
+ if (mapping->flags & AMDGPU_VM_PAGE_PRT && prt_fini_needed) {
amdgpu_vm_prt_fini(adev, vm);
prt_fini_needed = false;
}
@@ -2235,8 +2781,42 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
}
- amdgpu_vm_free_levels(&vm->root);
- dma_fence_put(vm->last_dir_update);
+ amdgpu_vm_pt_free_root(adev, vm);
+ amdgpu_bo_unreserve(root);
+ amdgpu_bo_unref(&root);
+ WARN_ON(vm->root.bo);
+
+ amdgpu_vm_fini_entities(vm);
+
+ if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
+ dev_err(adev->dev, "still active bo inside vm\n");
+ }
+ rbtree_postorder_for_each_entry_safe(mapping, tmp,
+ &vm->va.rb_root, rb) {
+ /* Don't remove the mapping here, we don't want to trigger a
+ * rebalance and the tree is about to be destroyed anyway.
+ */
+ list_del(&mapping->list);
+ kfree(mapping);
+ }
+
+ dma_fence_put(vm->last_update);
+
+ for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
+ amdgpu_vmid_free_reserved(adev, vm, i);
+ }
+
+ ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
+
+ if (!amdgpu_vm_stats_is_zero(vm)) {
+ struct amdgpu_task_info *ti = vm->task_info;
+
+ dev_warn(adev->dev,
+ "VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n",
+ ti->process_name, ti->task.pid, ti->task.comm, ti->tgid);
+ }
+
+ amdgpu_vm_put_task_info(vm->task_info);
}
/**
@@ -2248,33 +2828,45 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
*/
void amdgpu_vm_manager_init(struct amdgpu_device *adev)
{
- unsigned i, j;
-
- for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
- struct amdgpu_vm_id_manager *id_mgr =
- &adev->vm_manager.id_mgr[i];
-
- mutex_init(&id_mgr->lock);
- INIT_LIST_HEAD(&id_mgr->ids_lru);
+ unsigned i;
- /* skip over VMID 0, since it is the system VM */
- for (j = 1; j < id_mgr->num_ids; ++j) {
- amdgpu_vm_reset_id(adev, i, j);
- amdgpu_sync_create(&id_mgr->ids[i].active);
- list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
- }
- }
+ /* Concurrent flushes are only possible starting with Vega10 and
+ * are broken on Navi10 and Navi14.
+ */
+ adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
+ adev->asic_type == CHIP_NAVI10 ||
+ adev->asic_type == CHIP_NAVI14);
+ amdgpu_vmid_mgr_init(adev);
adev->vm_manager.fence_context =
dma_fence_context_alloc(AMDGPU_MAX_RINGS);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
adev->vm_manager.seqno[i] = 0;
-
- atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
- atomic64_set(&adev->vm_manager.client_counter, 0);
spin_lock_init(&adev->vm_manager.prt_lock);
atomic_set(&adev->vm_manager.num_prt_users, 0);
+
+ /* If not overridden by the user, by default, only in large BAR systems
+ * Compute VM tables will be updated by CPU
+ */
+#ifdef CONFIG_X86_64
+ if (amdgpu_vm_update_mode == -1) {
+ /* For asic with VF MMIO access protection
+ * avoid using CPU for VM table updates
+ */
+ if (amdgpu_gmc_vram_full_visible(&adev->gmc) &&
+ !amdgpu_sriov_vf_mmio_access_protection(adev))
+ adev->vm_manager.vm_update_mode =
+ AMDGPU_VM_USE_CPU_FOR_COMPUTE;
+ else
+ adev->vm_manager.vm_update_mode = 0;
+ } else
+ adev->vm_manager.vm_update_mode = amdgpu_vm_update_mode;
+#else
+ adev->vm_manager.vm_update_mode = 0;
+#endif
+
+ xa_init_flags(&adev->vm_manager.pasids, XA_FLAGS_LOCK_IRQ);
}
/**
@@ -2286,19 +2878,325 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
*/
void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
{
- unsigned i, j;
+ WARN_ON(!xa_empty(&adev->vm_manager.pasids));
+ xa_destroy(&adev->vm_manager.pasids);
+
+ amdgpu_vmid_mgr_fini(adev);
+}
+
+/**
+ * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
+ *
+ * @dev: drm device pointer
+ * @data: drm_amdgpu_vm
+ * @filp: drm file pointer
+ *
+ * Returns:
+ * 0 for success, -errno for errors.
+ */
+int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
+{
+ union drm_amdgpu_vm *args = data;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
+
+ /* No valid flags defined yet */
+ if (args->in.flags)
+ return -EINVAL;
+
+ switch (args->in.op) {
+ case AMDGPU_VM_OP_RESERVE_VMID:
+ /* We only have requirement to reserve vmid from gfxhub */
+ amdgpu_vmid_alloc_reserved(adev, vm, AMDGPU_GFXHUB(0));
+ break;
+ case AMDGPU_VM_OP_UNRESERVE_VMID:
+ amdgpu_vmid_free_reserved(adev, vm, AMDGPU_GFXHUB(0));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_vm_handle_fault - graceful handling of VM faults.
+ * @adev: amdgpu device pointer
+ * @pasid: PASID of the VM
+ * @ts: Timestamp of the fault
+ * @vmid: VMID, only used for GFX 9.4.3.
+ * @node_id: Node_id received in IH cookie. Only applicable for
+ * GFX 9.4.3.
+ * @addr: Address of the fault
+ * @write_fault: true is write fault, false is read fault
+ *
+ * Try to gracefully handle a VM fault. Return true if the fault was handled and
+ * shouldn't be reported any more.
+ */
+bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
+ u32 vmid, u32 node_id, uint64_t addr, uint64_t ts,
+ bool write_fault)
+{
+ bool is_compute_context = false;
+ struct amdgpu_bo *root;
+ unsigned long irqflags;
+ uint64_t value, flags;
+ struct amdgpu_vm *vm;
+ int r;
+
+ xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
+ vm = xa_load(&adev->vm_manager.pasids, pasid);
+ if (vm) {
+ root = amdgpu_bo_ref(vm->root.bo);
+ is_compute_context = vm->is_compute_context;
+ } else {
+ root = NULL;
+ }
+ xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
- for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
- struct amdgpu_vm_id_manager *id_mgr =
- &adev->vm_manager.id_mgr[i];
+ if (!root)
+ return false;
+
+ addr /= AMDGPU_GPU_PAGE_SIZE;
+
+ if (is_compute_context && !svm_range_restore_pages(adev, pasid, vmid,
+ node_id, addr, ts, write_fault)) {
+ amdgpu_bo_unref(&root);
+ return true;
+ }
- mutex_destroy(&id_mgr->lock);
- for (j = 0; j < AMDGPU_NUM_VM; ++j) {
- struct amdgpu_vm_id *id = &id_mgr->ids[j];
+ r = amdgpu_bo_reserve(root, true);
+ if (r)
+ goto error_unref;
+
+ /* Double check that the VM still exists */
+ xa_lock_irqsave(&adev->vm_manager.pasids, irqflags);
+ vm = xa_load(&adev->vm_manager.pasids, pasid);
+ if (vm && vm->root.bo != root)
+ vm = NULL;
+ xa_unlock_irqrestore(&adev->vm_manager.pasids, irqflags);
+ if (!vm)
+ goto error_unlock;
+
+ flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
+ AMDGPU_PTE_SYSTEM;
+
+ if (is_compute_context) {
+ /* Intentionally setting invalid PTE flag
+ * combination to force a no-retry-fault
+ */
+ flags = AMDGPU_VM_NORETRY_FLAGS;
+ value = 0;
+ } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
+ /* Redirect the access to the dummy page */
+ value = adev->dummy_page_addr;
+ flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE |
+ AMDGPU_PTE_WRITEABLE;
- amdgpu_sync_free(&id->active);
- dma_fence_put(id->flushed_updates);
- dma_fence_put(id->last_flush);
+ } else {
+ /* Let the hw retry silently on the PTE */
+ value = 0;
+ }
+
+ r = dma_resv_reserve_fences(root->tbo.base.resv, 1);
+ if (r) {
+ pr_debug("failed %d to reserve fence slot\n", r);
+ goto error_unlock;
+ }
+
+ r = amdgpu_vm_update_range(adev, vm, true, false, false, false,
+ NULL, addr, addr, flags, value, 0, NULL, NULL, NULL);
+ if (r)
+ goto error_unlock;
+
+ r = amdgpu_vm_update_pdes(adev, vm, true);
+
+error_unlock:
+ amdgpu_bo_unreserve(root);
+ if (r < 0)
+ dev_err(adev->dev, "Can't handle page fault (%d)\n", r);
+
+error_unref:
+ amdgpu_bo_unref(&root);
+
+ return false;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+/**
+ * amdgpu_debugfs_vm_bo_info - print BO info for the VM
+ *
+ * @vm: Requested VM for printing BO info
+ * @m: debugfs file
+ *
+ * Print BO information in debugfs file for the VM
+ */
+void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
+{
+ struct amdgpu_bo_va *bo_va, *tmp;
+ u64 total_idle = 0;
+ u64 total_evicted = 0;
+ u64 total_relocated = 0;
+ u64 total_moved = 0;
+ u64 total_invalidated = 0;
+ u64 total_done = 0;
+ unsigned int total_idle_objs = 0;
+ unsigned int total_evicted_objs = 0;
+ unsigned int total_relocated_objs = 0;
+ unsigned int total_moved_objs = 0;
+ unsigned int total_invalidated_objs = 0;
+ unsigned int total_done_objs = 0;
+ unsigned int id = 0;
+
+ amdgpu_vm_assert_locked(vm);
+
+ spin_lock(&vm->status_lock);
+ seq_puts(m, "\tIdle BOs:\n");
+ list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
+ if (!bo_va->base.bo)
+ continue;
+ total_idle += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
+ }
+ total_idle_objs = id;
+ id = 0;
+
+ seq_puts(m, "\tEvicted BOs:\n");
+ list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
+ if (!bo_va->base.bo)
+ continue;
+ total_evicted += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
+ }
+ total_evicted_objs = id;
+ id = 0;
+
+ seq_puts(m, "\tRelocated BOs:\n");
+ list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
+ if (!bo_va->base.bo)
+ continue;
+ total_relocated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
+ }
+ total_relocated_objs = id;
+ id = 0;
+
+ seq_puts(m, "\tMoved BOs:\n");
+ list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
+ if (!bo_va->base.bo)
+ continue;
+ total_moved += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
+ }
+ total_moved_objs = id;
+ id = 0;
+
+ seq_puts(m, "\tInvalidated BOs:\n");
+ list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
+ if (!bo_va->base.bo)
+ continue;
+ total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
+ }
+ total_invalidated_objs = id;
+ id = 0;
+
+ seq_puts(m, "\tDone BOs:\n");
+ list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
+ if (!bo_va->base.bo)
+ continue;
+ total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
+ }
+ spin_unlock(&vm->status_lock);
+ total_done_objs = id;
+
+ seq_printf(m, "\tTotal idle size: %12lld\tobjs:\t%d\n", total_idle,
+ total_idle_objs);
+ seq_printf(m, "\tTotal evicted size: %12lld\tobjs:\t%d\n", total_evicted,
+ total_evicted_objs);
+ seq_printf(m, "\tTotal relocated size: %12lld\tobjs:\t%d\n", total_relocated,
+ total_relocated_objs);
+ seq_printf(m, "\tTotal moved size: %12lld\tobjs:\t%d\n", total_moved,
+ total_moved_objs);
+ seq_printf(m, "\tTotal invalidated size: %12lld\tobjs:\t%d\n", total_invalidated,
+ total_invalidated_objs);
+ seq_printf(m, "\tTotal done size: %12lld\tobjs:\t%d\n", total_done,
+ total_done_objs);
+}
+#endif
+
+/**
+ * amdgpu_vm_update_fault_cache - update cached fault into.
+ * @adev: amdgpu device pointer
+ * @pasid: PASID of the VM
+ * @addr: Address of the fault
+ * @status: GPUVM fault status register
+ * @vmhub: which vmhub got the fault
+ *
+ * Cache the fault info for later use by userspace in debugging.
+ */
+void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
+ unsigned int pasid,
+ uint64_t addr,
+ uint32_t status,
+ unsigned int vmhub)
+{
+ struct amdgpu_vm *vm;
+ unsigned long flags;
+
+ xa_lock_irqsave(&adev->vm_manager.pasids, flags);
+
+ vm = xa_load(&adev->vm_manager.pasids, pasid);
+ /* Don't update the fault cache if status is 0. In the multiple
+ * fault case, subsequent faults will return a 0 status which is
+ * useless for userspace and replaces the useful fault status, so
+ * only update if status is non-0.
+ */
+ if (vm && status) {
+ vm->fault_info.addr = addr;
+ vm->fault_info.status = status;
+ /*
+ * Update the fault information globally for later usage
+ * when vm could be stale or freed.
+ */
+ adev->vm_manager.fault_info.addr = addr;
+ adev->vm_manager.fault_info.vmhub = vmhub;
+ adev->vm_manager.fault_info.status = status;
+
+ if (AMDGPU_IS_GFXHUB(vmhub)) {
+ vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX;
+ vm->fault_info.vmhub |=
+ (vmhub - AMDGPU_GFXHUB_START) << AMDGPU_VMHUB_IDX_SHIFT;
+ } else if (AMDGPU_IS_MMHUB0(vmhub)) {
+ vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0;
+ vm->fault_info.vmhub |=
+ (vmhub - AMDGPU_MMHUB0_START) << AMDGPU_VMHUB_IDX_SHIFT;
+ } else if (AMDGPU_IS_MMHUB1(vmhub)) {
+ vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1;
+ vm->fault_info.vmhub |=
+ (vmhub - AMDGPU_MMHUB1_START) << AMDGPU_VMHUB_IDX_SHIFT;
+ } else {
+ WARN_ONCE(1, "Invalid vmhub %u\n", vmhub);
}
}
+ xa_unlock_irqrestore(&adev->vm_manager.pasids, flags);
+}
+
+/**
+ * amdgpu_vm_is_bo_always_valid - check if the BO is VM always valid
+ *
+ * @vm: VM to test against.
+ * @bo: BO to be tested.
+ *
+ * Returns true if the BO shares the dma_resv object with the root PD and is
+ * always guaranteed to be valid inside the VM.
+ */
+bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
+{
+ return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
+}
+
+void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
+ struct amdgpu_task_info *task_info)
+{
+ dev_err(adev->dev,
+ " Process %s pid %d thread %s pid %d\n",
+ task_info->process_name, task_info->tgid,
+ task_info->task.comm, task_info->task.pid);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index d97e28b4bdc4..cf0ec94e8a07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -24,39 +24,43 @@
#ifndef __AMDGPU_VM_H__
#define __AMDGPU_VM_H__
+#include <linux/idr.h>
+#include <linux/kfifo.h>
#include <linux/rbtree.h>
+#include <drm/gpu_scheduler.h>
+#include <drm/drm_file.h>
+#include <drm/ttm/ttm_bo.h>
+#include <linux/sched/mm.h>
-#include "gpu_scheduler.h"
#include "amdgpu_sync.h"
#include "amdgpu_ring.h"
+#include "amdgpu_ids.h"
+#include "amdgpu_ttm.h"
+
+struct drm_exec;
struct amdgpu_bo_va;
struct amdgpu_job;
struct amdgpu_bo_list_entry;
+struct amdgpu_bo_vm;
/*
* GPUVM handling
*/
-/* maximum number of VMIDs */
-#define AMDGPU_NUM_VM 16
-
/* Maximum number of PTEs the hardware can write with one command */
#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
/* number of entries in page table */
#define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
-/* PTBs (Page Table Blocks) need to be aligned to 32K */
-#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
-
-/* LOG2 number of continuous pages for the fragment field */
-#define AMDGPU_LOG2_PAGES_PER_FRAG 4
-
#define AMDGPU_PTE_VALID (1ULL << 0)
#define AMDGPU_PTE_SYSTEM (1ULL << 1)
#define AMDGPU_PTE_SNOOPED (1ULL << 2)
+/* RV+ */
+#define AMDGPU_PTE_TMZ (1ULL << 3)
+
/* VI only */
#define AMDGPU_PTE_EXECUTABLE (1ULL << 4)
@@ -68,95 +72,386 @@ struct amdgpu_bo_list_entry;
/* TILED for VEGA10, reserved for older ASICs */
#define AMDGPU_PTE_PRT (1ULL << 51)
-/* VEGA10 only */
-#define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57)
-#define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL)
-
-/* How to programm VM fault handling */
+/* PDE is handled as PTE for VEGA10 */
+#define AMDGPU_PDE_PTE (1ULL << 54)
+
+#define AMDGPU_PTE_LOG (1ULL << 55)
+
+/* PTE is handled as PDE for VEGA10 (Translate Further) */
+#define AMDGPU_PTE_TF (1ULL << 56)
+
+/* MALL noalloc for sienna_cichlid, reserved for older ASICs */
+#define AMDGPU_PTE_NOALLOC (1ULL << 58)
+
+/* PDE Block Fragment Size for VEGA10 */
+#define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59)
+
+/* Flag combination to set no-retry with TF disabled */
+#define AMDGPU_VM_NORETRY_FLAGS (AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE | \
+ AMDGPU_PTE_TF)
+
+/* Flag combination to set no-retry with TF enabled */
+#define AMDGPU_VM_NORETRY_FLAGS_TF (AMDGPU_PTE_VALID | AMDGPU_PTE_SYSTEM | \
+ AMDGPU_PTE_PRT)
+/* For GFX9 */
+#define AMDGPU_PTE_MTYPE_VG10_SHIFT(mtype) ((uint64_t)(mtype) << 57)
+#define AMDGPU_PTE_MTYPE_VG10_MASK AMDGPU_PTE_MTYPE_VG10_SHIFT(3ULL)
+#define AMDGPU_PTE_MTYPE_VG10(flags, mtype) \
+ (((uint64_t)(flags) & (~AMDGPU_PTE_MTYPE_VG10_MASK)) | \
+ AMDGPU_PTE_MTYPE_VG10_SHIFT(mtype))
+
+#define AMDGPU_MTYPE_NC 0
+#define AMDGPU_MTYPE_CC 2
+
+#define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \
+ | AMDGPU_PTE_SNOOPED \
+ | AMDGPU_PTE_EXECUTABLE \
+ | AMDGPU_PTE_READABLE \
+ | AMDGPU_PTE_WRITEABLE \
+ | AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_CC))
+
+/* gfx10 */
+#define AMDGPU_PTE_MTYPE_NV10_SHIFT(mtype) ((uint64_t)(mtype) << 48)
+#define AMDGPU_PTE_MTYPE_NV10_MASK AMDGPU_PTE_MTYPE_NV10_SHIFT(7ULL)
+#define AMDGPU_PTE_MTYPE_NV10(flags, mtype) \
+ (((uint64_t)(flags) & (~AMDGPU_PTE_MTYPE_NV10_MASK)) | \
+ AMDGPU_PTE_MTYPE_NV10_SHIFT(mtype))
+
+/* gfx12 */
+#define AMDGPU_PTE_PRT_GFX12 (1ULL << 56)
+#define AMDGPU_PTE_PRT_FLAG(adev) \
+ ((amdgpu_ip_version((adev), GC_HWIP, 0) >= IP_VERSION(12, 0, 0)) ? AMDGPU_PTE_PRT_GFX12 : AMDGPU_PTE_PRT)
+
+#define AMDGPU_PTE_MTYPE_GFX12_SHIFT(mtype) ((uint64_t)(mtype) << 54)
+#define AMDGPU_PTE_MTYPE_GFX12_MASK AMDGPU_PTE_MTYPE_GFX12_SHIFT(3ULL)
+#define AMDGPU_PTE_MTYPE_GFX12(flags, mtype) \
+ (((uint64_t)(flags) & (~AMDGPU_PTE_MTYPE_GFX12_MASK)) | \
+ AMDGPU_PTE_MTYPE_GFX12_SHIFT(mtype))
+
+#define AMDGPU_PTE_DCC (1ULL << 58)
+#define AMDGPU_PTE_IS_PTE (1ULL << 63)
+
+/* PDE Block Fragment Size for gfx v12 */
+#define AMDGPU_PDE_BFS_GFX12(a) ((uint64_t)((a) & 0x1fULL) << 58)
+#define AMDGPU_PDE_BFS_FLAG(adev, a) \
+ ((amdgpu_ip_version((adev), GC_HWIP, 0) >= IP_VERSION(12, 0, 0)) ? AMDGPU_PDE_BFS_GFX12(a) : AMDGPU_PDE_BFS(a))
+/* PDE is handled as PTE for gfx v12 */
+#define AMDGPU_PDE_PTE_GFX12 (1ULL << 63)
+#define AMDGPU_PDE_PTE_FLAG(adev) \
+ ((amdgpu_ip_version((adev), GC_HWIP, 0) >= IP_VERSION(12, 0, 0)) ? AMDGPU_PDE_PTE_GFX12 : AMDGPU_PDE_PTE)
+
+/* How to program VM fault handling */
#define AMDGPU_VM_FAULT_STOP_NEVER 0
#define AMDGPU_VM_FAULT_STOP_FIRST 1
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
-/* max number of VMHUB */
-#define AMDGPU_MAX_VMHUBS 2
-#define AMDGPU_GFXHUB 0
-#define AMDGPU_MMHUB 1
+/* How much VRAM be reserved for page tables */
+#define AMDGPU_VM_RESERVED_VRAM (8ULL << 20)
+
+/*
+ * max number of VMHUB
+ * layout: max 8 GFXHUB + 4 MMHUB0 + 1 MMHUB1
+ */
+#define AMDGPU_MAX_VMHUBS 13
+#define AMDGPU_GFXHUB_START 0
+#define AMDGPU_MMHUB0_START 8
+#define AMDGPU_MMHUB1_START 12
+#define AMDGPU_GFXHUB(x) (AMDGPU_GFXHUB_START + (x))
+#define AMDGPU_MMHUB0(x) (AMDGPU_MMHUB0_START + (x))
+#define AMDGPU_MMHUB1(x) (AMDGPU_MMHUB1_START + (x))
+
+#define AMDGPU_IS_GFXHUB(x) ((x) >= AMDGPU_GFXHUB_START && (x) < AMDGPU_MMHUB0_START)
+#define AMDGPU_IS_MMHUB0(x) ((x) >= AMDGPU_MMHUB0_START && (x) < AMDGPU_MMHUB1_START)
+#define AMDGPU_IS_MMHUB1(x) ((x) >= AMDGPU_MMHUB1_START && (x) < AMDGPU_MAX_VMHUBS)
+
+/* Reserve space at top/bottom of address space for kernel use */
+#define AMDGPU_VA_RESERVED_CSA_SIZE (2ULL << 20)
+#define AMDGPU_VA_RESERVED_CSA_START(adev) (((adev)->vm_manager.max_pfn \
+ << AMDGPU_GPU_PAGE_SHIFT) \
+ - AMDGPU_VA_RESERVED_CSA_SIZE)
+#define AMDGPU_VA_RESERVED_SEQ64_SIZE (2ULL << 20)
+#define AMDGPU_VA_RESERVED_SEQ64_START(adev) (AMDGPU_VA_RESERVED_CSA_START(adev) \
+ - AMDGPU_VA_RESERVED_SEQ64_SIZE)
+#define AMDGPU_VA_RESERVED_TRAP_SIZE (2ULL << 12)
+#define AMDGPU_VA_RESERVED_TRAP_START(adev) (AMDGPU_VA_RESERVED_SEQ64_START(adev) \
+ - AMDGPU_VA_RESERVED_TRAP_SIZE)
+#define AMDGPU_VA_RESERVED_BOTTOM (1ULL << 16)
+#define AMDGPU_VA_RESERVED_TOP (AMDGPU_VA_RESERVED_TRAP_SIZE + \
+ AMDGPU_VA_RESERVED_SEQ64_SIZE + \
+ AMDGPU_VA_RESERVED_CSA_SIZE)
+
+/* See vm_update_mode */
+#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
+#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
+
+/* VMPT level enumerate, and the hiberachy is:
+ * PDB2->PDB1->PDB0->PTB
+ */
+enum amdgpu_vm_level {
+ AMDGPU_VM_PDB2,
+ AMDGPU_VM_PDB1,
+ AMDGPU_VM_PDB0,
+ AMDGPU_VM_PTB
+};
+
+/* base structure for tracking BO usage in a VM */
+struct amdgpu_vm_bo_base {
+ /* constant after initialization */
+ struct amdgpu_vm *vm;
+ struct amdgpu_bo *bo;
-/* hardcode that limit for now */
-#define AMDGPU_VA_RESERVED_SIZE (8 << 20)
+ /* protected by bo being reserved */
+ struct amdgpu_vm_bo_base *next;
-struct amdgpu_vm_pt {
- struct amdgpu_bo *bo;
- uint64_t addr;
+ /* protected by vm status_lock */
+ struct list_head vm_status;
- /* array of page tables, one for each directory entry */
- struct amdgpu_vm_pt *entries;
- unsigned last_entry_used;
+ /* if the bo is counted as shared in mem stats
+ * protected by vm status_lock */
+ bool shared;
+
+ /* protected by the BO being reserved */
+ bool moved;
+};
+
+/* provided by hw blocks that can write ptes, e.g., sdma */
+struct amdgpu_vm_pte_funcs {
+ /* number of dw to reserve per operation */
+ unsigned copy_pte_num_dw;
+
+ /* copy pte entries from GART */
+ void (*copy_pte)(struct amdgpu_ib *ib,
+ uint64_t pe, uint64_t src,
+ unsigned count);
+
+ /* write pte one entry at a time with addr mapping */
+ void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
+ uint64_t value, unsigned count,
+ uint32_t incr);
+ /* for linear pte/pde updates without addr mapping */
+ void (*set_pte_pde)(struct amdgpu_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint64_t flags);
+};
+
+struct amdgpu_task_info {
+ struct drm_wedge_task_info task;
+ char process_name[TASK_COMM_LEN];
+ pid_t tgid;
+ struct kref refcount;
+};
+
+/**
+ * struct amdgpu_vm_update_params
+ *
+ * Encapsulate some VM table update parameters to reduce
+ * the number of function parameters
+ *
+ */
+struct amdgpu_vm_update_params {
+
+ /**
+ * @adev: amdgpu device we do this update for
+ */
+ struct amdgpu_device *adev;
+
+ /**
+ * @vm: optional amdgpu_vm we do this update for
+ */
+ struct amdgpu_vm *vm;
+
+ /**
+ * @immediate: if changes should be made immediately
+ */
+ bool immediate;
+
+ /**
+ * @unlocked: true if the root BO is not locked
+ */
+ bool unlocked;
+
+ /**
+ * @pages_addr:
+ *
+ * DMA addresses to use for mapping
+ */
+ dma_addr_t *pages_addr;
+
+ /**
+ * @job: job to used for hw submission
+ */
+ struct amdgpu_job *job;
+
+ /**
+ * @num_dw_left: number of dw left for the IB
+ */
+ unsigned int num_dw_left;
+
+ /**
+ * @needs_flush: true whenever we need to invalidate the TLB
+ */
+ bool needs_flush;
+
+ /**
+ * @allow_override: true for memory that is not uncached: allows MTYPE
+ * to be overridden for NUMA local memory.
+ */
+ bool allow_override;
+
+ /**
+ * @tlb_flush_waitlist: temporary storage for BOs until tlb_flush
+ */
+ struct list_head tlb_flush_waitlist;
+};
+
+struct amdgpu_vm_update_funcs {
+ int (*map_table)(struct amdgpu_bo_vm *bo);
+ int (*prepare)(struct amdgpu_vm_update_params *p,
+ struct amdgpu_sync *sync, u64 k_job_id);
+ int (*update)(struct amdgpu_vm_update_params *p,
+ struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
+ unsigned count, uint32_t incr, uint64_t flags);
+ int (*commit)(struct amdgpu_vm_update_params *p,
+ struct dma_fence **fence);
+};
+
+struct amdgpu_vm_fault_info {
+ /* fault address */
+ uint64_t addr;
+ /* fault status register */
+ uint32_t status;
+ /* which vmhub? gfxhub, mmhub, etc. */
+ unsigned int vmhub;
+};
+
+struct amdgpu_mem_stats {
+ struct drm_memory_stats drm;
+
+ /* buffers that requested this placement but are currently evicted */
+ uint64_t evicted;
};
struct amdgpu_vm {
/* tree of virtual addresses mapped */
- struct rb_root va;
+ struct rb_root_cached va;
+
+ /* Lock to prevent eviction while we are updating page tables
+ * use vm_eviction_lock/unlock(vm)
+ */
+ struct mutex eviction_lock;
+ bool evicting;
+ unsigned int saved_flags;
- /* protecting invalidated */
+ /* Lock to protect vm_bo add/del/move on all lists of vm */
spinlock_t status_lock;
- /* BOs moved, but not yet updated in the PT */
+ /* Memory statistics for this vm, protected by status_lock */
+ struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM];
+
+ /*
+ * The following lists contain amdgpu_vm_bo_base objects for either
+ * PDs, PTs or per VM BOs. The state transits are:
+ *
+ * evicted -> relocated (PDs, PTs) or moved (per VM BOs) -> idle
+ */
+
+ /* Per-VM and PT BOs who needs a validation */
+ struct list_head evicted;
+
+ /* PT BOs which relocated and their parent need an update */
+ struct list_head relocated;
+
+ /* per VM BOs moved, but not yet updated in the PT */
+ struct list_head moved;
+
+ /* All BOs of this VM not currently in the state machine */
+ struct list_head idle;
+
+ /*
+ * The following lists contain amdgpu_vm_bo_base objects for BOs which
+ * have their own dma_resv object and not depend on the root PD. Their
+ * state transits are:
+ *
+ * evicted_user or invalidated -> done
+ */
+
+ /* BOs for user mode queues that need a validation */
+ struct list_head evicted_user;
+
+ /* regular invalidated BOs, but not yet updated in the PT */
struct list_head invalidated;
- /* BOs cleared in the PT because of a move */
- struct list_head cleared;
+ /* BOs which are invalidated, has been updated in the PTs */
+ struct list_head done;
- /* BO mappings freed, but not yet updated in the PT */
+ /*
+ * This list contains amdgpu_bo_va_mapping objects which have been freed
+ * but not updated in the PTs
+ */
struct list_head freed;
/* contains the page directory */
- struct amdgpu_vm_pt root;
- struct dma_fence *last_dir_update;
- uint64_t last_eviction_counter;
+ struct amdgpu_vm_bo_base root;
+ struct dma_fence *last_update;
- /* protecting freed */
- spinlock_t freed_lock;
+ /* Scheduler entities for page table updates */
+ struct drm_sched_entity immediate;
+ struct drm_sched_entity delayed;
- /* Scheduler entity for page table updates */
- struct amd_sched_entity entity;
+ /* Last finished delayed update */
+ atomic64_t tlb_seq;
+ struct dma_fence *last_tlb_flush;
+ atomic64_t kfd_last_flushed_seq;
+ uint64_t tlb_fence_context;
- /* client id */
- u64 client_id;
- /* each VM will map on CSA */
- struct amdgpu_bo_va *csa_bo_va;
-};
+ /* How many times we had to re-generate the page tables */
+ uint64_t generation;
-struct amdgpu_vm_id {
- struct list_head list;
- struct amdgpu_sync active;
- struct dma_fence *last_flush;
- atomic64_t owner;
+ /* Last unlocked submission to the scheduler entities */
+ struct dma_fence *last_unlocked;
- uint64_t pd_gpu_addr;
- /* last flushed PD/PT update */
- struct dma_fence *flushed_updates;
+ unsigned int pasid;
+ struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
- uint32_t current_gpu_reset_count;
+ /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
+ bool use_cpu_for_update;
- uint32_t gds_base;
- uint32_t gds_size;
- uint32_t gws_base;
- uint32_t gws_size;
- uint32_t oa_base;
- uint32_t oa_size;
-};
+ /* Functions to use for VM table updates */
+ const struct amdgpu_vm_update_funcs *update_funcs;
+
+ /* Up to 128 pending retry page faults */
+ DECLARE_KFIFO(faults, u64, 128);
+
+ /* Points to the KFD process VM info */
+ struct amdkfd_process_info *process_info;
+
+ /* List node in amdkfd_process_info.vm_list_head */
+ struct list_head vm_list_node;
+
+ /* Valid while the PD is reserved or fenced */
+ uint64_t pd_phys_addr;
+
+ /* Some basic info about the task */
+ struct amdgpu_task_info *task_info;
+
+ /* Store positions of group of BOs */
+ struct ttm_lru_bulk_move lru_bulk_move;
+ /* Flag to indicate if VM is used for compute */
+ bool is_compute_context;
+
+ /* Memory partition number, -1 means any partition */
+ int8_t mem_id;
-struct amdgpu_vm_id_manager {
- struct mutex lock;
- unsigned num_ids;
- struct list_head ids_lru;
- struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
+ /* cached fault info */
+ struct amdgpu_vm_fault_info fault_info;
};
struct amdgpu_vm_manager {
/* Handling of VMIDs */
- struct amdgpu_vm_id_manager id_mgr[AMDGPU_MAX_VMHUBS];
+ struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS];
+ unsigned int first_kfd_vmid;
+ bool concurrent_flush;
/* Handling of VM fences */
u64 fence_context;
@@ -164,58 +459,94 @@ struct amdgpu_vm_manager {
uint64_t max_pfn;
uint32_t num_level;
- uint64_t vm_size;
uint32_t block_size;
+ uint32_t fragment_size;
+ enum amdgpu_vm_level root_level;
/* vram base address for page table entry */
u64 vram_base_offset;
- /* is vm enabled? */
- bool enabled;
/* vm pte handling */
- const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
- struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
- unsigned vm_pte_num_rings;
- atomic_t vm_pte_next_ring;
- /* client id counter */
- atomic64_t client_counter;
+ const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
+ struct drm_gpu_scheduler *vm_pte_scheds[AMDGPU_MAX_RINGS];
+ unsigned vm_pte_num_scheds;
+ struct amdgpu_ring *page_fault;
/* partial resident texture handling */
spinlock_t prt_lock;
atomic_t num_prt_users;
+
+ /* controls how VM page tables are updated for Graphics and Compute.
+ * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU
+ * BIT1[= 0] Compute updated by SDMA [= 1] by CPU
+ */
+ int vm_update_mode;
+
+ /* PASID to VM mapping, will be used in interrupt context to
+ * look up VM of a page fault
+ */
+ struct xarray pasids;
+ /* Global registration of recent page fault information */
+ struct amdgpu_vm_fault_info fault_info;
};
+struct amdgpu_bo_va_mapping;
+
+#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
+#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
+#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
+
+extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs;
+extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
+
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+
+long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
+int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id, uint32_t pasid);
+int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
- struct list_head *validated,
- struct amdgpu_bo_list_entry *entry);
-int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int (*callback)(void *p, struct amdgpu_bo *bo),
- void *param);
-void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
-int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- uint64_t saddr, uint64_t size);
-int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
- struct amdgpu_sync *sync, struct dma_fence *fence,
- struct amdgpu_job *job);
-int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
-void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
- unsigned vmid);
-int amdgpu_vm_update_directories(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
+int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
+ unsigned int num_fences);
+int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
+ unsigned int num_fences);
+bool amdgpu_vm_ready(struct amdgpu_vm *vm);
+uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct ww_acquire_ctx *ticket,
+ int (*callback)(void *p, struct amdgpu_bo *bo),
+ void *param);
+int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
+int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, bool immediate);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
-int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_sync *sync);
+int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct ww_acquire_ctx *ticket);
+int amdgpu_vm_flush_compute_tlb(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ uint32_t flush_type,
+ uint32_t xcc_mask);
+void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
+ struct amdgpu_vm *vm, struct amdgpu_bo *bo);
+int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ bool immediate, bool unlocked, bool flush_tlb,
+ bool allow_override, struct amdgpu_sync *sync,
+ uint64_t start, uint64_t last, uint64_t flags,
+ uint64_t offset, uint64_t vram_base,
+ struct ttm_resource *res, dma_addr_t *pages_addr,
+ struct dma_fence **fence);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
bool clear);
-void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
- struct amdgpu_bo *bo);
+bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
+void amdgpu_vm_bo_invalidate(struct amdgpu_bo *bo, bool evicted);
+void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
+ struct ttm_resource *new_res, int sign);
+void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo);
+void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
+ bool evicted);
+uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo);
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
@@ -224,19 +555,141 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr, uint64_t offset,
- uint64_t size, uint64_t flags);
+ uint64_t size, uint32_t flags);
int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr, uint64_t offset,
- uint64_t size, uint64_t flags);
+ uint64_t size, uint32_t flags);
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr);
int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
uint64_t saddr, uint64_t size);
-void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
+struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
+ uint64_t addr);
+void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
+void amdgpu_vm_bo_del(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size);
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
+ uint32_t fragment_size_default, unsigned max_level,
+ unsigned max_bits);
+int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
+bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
+ struct amdgpu_job *job);
+void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
+
+struct amdgpu_task_info *
+amdgpu_vm_get_task_info_pasid(struct amdgpu_device *adev, u32 pasid);
+
+struct amdgpu_task_info *
+amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm);
+
+void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info);
+
+bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
+ u32 vmid, u32 node_id, uint64_t addr, uint64_t ts,
+ bool write_fault);
+
+void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
+
+void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
+void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
+ struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM]);
+
+int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo_vm *vmbo, bool immediate);
+int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int level, bool immediate, struct amdgpu_bo_vm **vmbo,
+ int32_t xcp_id);
+void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+
+int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
+ struct amdgpu_vm_bo_base *entry);
+int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
+ uint64_t start, uint64_t end,
+ uint64_t dst, uint64_t flags);
+void amdgpu_vm_pt_free_work(struct work_struct *work);
+void amdgpu_vm_pt_free_list(struct amdgpu_device *adev,
+ struct amdgpu_vm_update_params *params);
+
+#if defined(CONFIG_DEBUG_FS)
+void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
+#endif
+
+int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+
+bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo);
+
+/**
+ * amdgpu_vm_tlb_seq - return tlb flush sequence number
+ * @vm: the amdgpu_vm structure to query
+ *
+ * Returns the tlb flush sequence number which indicates that the VM TLBs needs
+ * to be invalidated whenever the sequence number change.
+ */
+static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm)
+{
+ unsigned long flags;
+ spinlock_t *lock;
+
+ /*
+ * Workaround to stop racing between the fence signaling and handling
+ * the cb. The lock is static after initially setting it up, just make
+ * sure that the dma_fence structure isn't freed up.
+ */
+ rcu_read_lock();
+ lock = vm->last_tlb_flush->lock;
+ rcu_read_unlock();
+
+ spin_lock_irqsave(lock, flags);
+ spin_unlock_irqrestore(lock, flags);
+
+ return atomic64_read(&vm->tlb_seq);
+}
+
+/*
+ * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
+ * happens while holding this lock anywhere to prevent deadlocks when
+ * an MMU notifier runs in reclaim-FS context.
+ */
+static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
+{
+ mutex_lock(&vm->eviction_lock);
+ vm->saved_flags = memalloc_noreclaim_save();
+}
+
+static inline bool amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
+{
+ if (mutex_trylock(&vm->eviction_lock)) {
+ vm->saved_flags = memalloc_noreclaim_save();
+ return true;
+ }
+ return false;
+}
+
+static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
+{
+ memalloc_noreclaim_restore(vm->saved_flags);
+ mutex_unlock(&vm->eviction_lock);
+}
+
+void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev,
+ unsigned int pasid,
+ uint64_t addr,
+ uint32_t status,
+ unsigned int vmhub);
+void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct dma_fence **fence);
+
+void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
+ struct amdgpu_task_info *task_info);
+
+#define amdgpu_vm_bo_va_for_each_valid_mapping(bo_va, mapping) \
+ list_for_each_entry(mapping, &(bo_va)->valids, list)
+#define amdgpu_vm_bo_va_for_each_invalid_mapping(bo_va, mapping) \
+ list_for_each_entry(mapping, &(bo_va)->invalids, list)
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
new file mode 100644
index 000000000000..22e2e5b47341
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu_vm.h"
+#include "amdgpu_object.h"
+#include "amdgpu_trace.h"
+
+/**
+ * amdgpu_vm_cpu_map_table - make sure new PDs/PTs are kmapped
+ *
+ * @table: newly allocated or validated PD/PT
+ */
+static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table)
+{
+ table->bo.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ return amdgpu_bo_kmap(&table->bo, NULL);
+}
+
+/**
+ * amdgpu_vm_cpu_prepare - prepare page table update with the CPU
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @sync: sync obj with fences to wait on
+ * @k_job_id: the id for tracing/debug purposes
+ *
+ * Returns:
+ * Negativ errno, 0 for success.
+ */
+static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
+ struct amdgpu_sync *sync,
+ u64 k_job_id)
+{
+ if (!sync)
+ return 0;
+
+ return amdgpu_sync_wait(sync, true);
+}
+
+/**
+ * amdgpu_vm_cpu_update - helper to update page tables via CPU
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @vmbo: PD/PT to update
+ * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Write count number of PT/PD entries directly.
+ */
+static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
+ struct amdgpu_bo_vm *vmbo, uint64_t pe,
+ uint64_t addr, unsigned count, uint32_t incr,
+ uint64_t flags)
+{
+ unsigned int i;
+ uint64_t value;
+ long r;
+
+ r = dma_resv_wait_timeout(vmbo->bo.tbo.base.resv, DMA_RESV_USAGE_KERNEL,
+ true, MAX_SCHEDULE_TIMEOUT);
+ if (r < 0)
+ return r;
+
+ pe += (unsigned long)amdgpu_bo_kptr(&vmbo->bo);
+
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
+
+ for (i = 0; i < count; i++) {
+ value = p->pages_addr ?
+ amdgpu_vm_map_gart(p->pages_addr, addr) :
+ addr;
+ amdgpu_gmc_set_pte_pde(p->adev, (void *)(uintptr_t)pe,
+ i, value, flags);
+ addr += incr;
+ }
+ return 0;
+}
+
+/**
+ * amdgpu_vm_cpu_commit - commit page table update to the HW
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @fence: unused
+ *
+ * Make sure that the hardware sees the page table updates.
+ */
+static int amdgpu_vm_cpu_commit(struct amdgpu_vm_update_params *p,
+ struct dma_fence **fence)
+{
+ if (p->needs_flush)
+ atomic64_inc(&p->vm->tlb_seq);
+
+ mb();
+ amdgpu_device_flush_hdp(p->adev, NULL);
+ return 0;
+}
+
+const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs = {
+ .map_table = amdgpu_vm_cpu_map_table,
+ .prepare = amdgpu_vm_cpu_prepare,
+ .update = amdgpu_vm_cpu_update,
+ .commit = amdgpu_vm_cpu_commit
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
new file mode 100644
index 000000000000..f794fb1cc06e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -0,0 +1,976 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drm_drv.h>
+
+#include "amdgpu.h"
+#include "amdgpu_trace.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_job.h"
+
+/*
+ * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
+ */
+struct amdgpu_vm_pt_cursor {
+ uint64_t pfn;
+ struct amdgpu_vm_bo_base *parent;
+ struct amdgpu_vm_bo_base *entry;
+ unsigned int level;
+};
+
+/**
+ * amdgpu_vm_pt_level_shift - return the addr shift for each level
+ *
+ * @adev: amdgpu_device pointer
+ * @level: VMPT level
+ *
+ * Returns:
+ * The number of bits the pfn needs to be right shifted for a level.
+ */
+static unsigned int amdgpu_vm_pt_level_shift(struct amdgpu_device *adev,
+ unsigned int level)
+{
+ switch (level) {
+ case AMDGPU_VM_PDB2:
+ case AMDGPU_VM_PDB1:
+ case AMDGPU_VM_PDB0:
+ return 9 * (AMDGPU_VM_PDB0 - level) +
+ adev->vm_manager.block_size;
+ case AMDGPU_VM_PTB:
+ return 0;
+ default:
+ return ~0;
+ }
+}
+
+/**
+ * amdgpu_vm_pt_num_entries - return the number of entries in a PD/PT
+ *
+ * @adev: amdgpu_device pointer
+ * @level: VMPT level
+ *
+ * Returns:
+ * The number of entries in a page directory or page table.
+ */
+static unsigned int amdgpu_vm_pt_num_entries(struct amdgpu_device *adev,
+ unsigned int level)
+{
+ unsigned int shift;
+
+ shift = amdgpu_vm_pt_level_shift(adev, adev->vm_manager.root_level);
+ if (level == adev->vm_manager.root_level)
+ /* For the root directory */
+ return round_up(adev->vm_manager.max_pfn, 1ULL << shift)
+ >> shift;
+ else if (level != AMDGPU_VM_PTB)
+ /* Everything in between */
+ return 512;
+
+ /* For the page tables on the leaves */
+ return AMDGPU_VM_PTE_COUNT(adev);
+}
+
+/**
+ * amdgpu_vm_pt_entries_mask - the mask to get the entry number of a PD/PT
+ *
+ * @adev: amdgpu_device pointer
+ * @level: VMPT level
+ *
+ * Returns:
+ * The mask to extract the entry number of a PD/PT from an address.
+ */
+static uint32_t amdgpu_vm_pt_entries_mask(struct amdgpu_device *adev,
+ unsigned int level)
+{
+ if (level <= adev->vm_manager.root_level)
+ return 0xffffffff;
+ else if (level != AMDGPU_VM_PTB)
+ return 0x1ff;
+ else
+ return AMDGPU_VM_PTE_COUNT(adev) - 1;
+}
+
+/**
+ * amdgpu_vm_pt_size - returns the size of the page table in bytes
+ *
+ * @adev: amdgpu_device pointer
+ * @level: VMPT level
+ *
+ * Returns:
+ * The size of the BO for a page directory or page table in bytes.
+ */
+static unsigned int amdgpu_vm_pt_size(struct amdgpu_device *adev,
+ unsigned int level)
+{
+ return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_pt_num_entries(adev, level) * 8);
+}
+
+/**
+ * amdgpu_vm_pt_parent - get the parent page directory
+ *
+ * @pt: child page table
+ *
+ * Helper to get the parent entry for the child page table. NULL if we are at
+ * the root page directory.
+ */
+static struct amdgpu_vm_bo_base *
+amdgpu_vm_pt_parent(struct amdgpu_vm_bo_base *pt)
+{
+ struct amdgpu_bo *parent = pt->bo->parent;
+
+ if (!parent)
+ return NULL;
+
+ return parent->vm_bo;
+}
+
+/**
+ * amdgpu_vm_pt_start - start PD/PT walk
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: amdgpu_vm structure
+ * @start: start address of the walk
+ * @cursor: state to initialize
+ *
+ * Initialize a amdgpu_vm_pt_cursor to start a walk.
+ */
+static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, uint64_t start,
+ struct amdgpu_vm_pt_cursor *cursor)
+{
+ cursor->pfn = start;
+ cursor->parent = NULL;
+ cursor->entry = &vm->root;
+ cursor->level = adev->vm_manager.root_level;
+}
+
+/**
+ * amdgpu_vm_pt_descendant - go to child node
+ *
+ * @adev: amdgpu_device pointer
+ * @cursor: current state
+ *
+ * Walk to the child node of the current node.
+ * Returns:
+ * True if the walk was possible, false otherwise.
+ */
+static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
+ struct amdgpu_vm_pt_cursor *cursor)
+{
+ unsigned int mask, shift, idx;
+
+ if ((cursor->level == AMDGPU_VM_PTB) || !cursor->entry ||
+ !cursor->entry->bo)
+ return false;
+
+ mask = amdgpu_vm_pt_entries_mask(adev, cursor->level);
+ shift = amdgpu_vm_pt_level_shift(adev, cursor->level);
+
+ ++cursor->level;
+ idx = (cursor->pfn >> shift) & mask;
+ cursor->parent = cursor->entry;
+ cursor->entry = &to_amdgpu_bo_vm(cursor->entry->bo)->entries[idx];
+ return true;
+}
+
+/**
+ * amdgpu_vm_pt_sibling - go to sibling node
+ *
+ * @adev: amdgpu_device pointer
+ * @cursor: current state
+ *
+ * Walk to the sibling node of the current node.
+ * Returns:
+ * True if the walk was possible, false otherwise.
+ */
+static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
+ struct amdgpu_vm_pt_cursor *cursor)
+{
+
+ unsigned int shift, num_entries;
+ struct amdgpu_bo_vm *parent;
+
+ /* Root doesn't have a sibling */
+ if (!cursor->parent)
+ return false;
+
+ /* Go to our parents and see if we got a sibling */
+ shift = amdgpu_vm_pt_level_shift(adev, cursor->level - 1);
+ num_entries = amdgpu_vm_pt_num_entries(adev, cursor->level - 1);
+ parent = to_amdgpu_bo_vm(cursor->parent->bo);
+
+ if (cursor->entry == &parent->entries[num_entries - 1])
+ return false;
+
+ cursor->pfn += 1ULL << shift;
+ cursor->pfn &= ~((1ULL << shift) - 1);
+ ++cursor->entry;
+ return true;
+}
+
+/**
+ * amdgpu_vm_pt_ancestor - go to parent node
+ *
+ * @cursor: current state
+ *
+ * Walk to the parent node of the current node.
+ * Returns:
+ * True if the walk was possible, false otherwise.
+ */
+static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
+{
+ if (!cursor->parent)
+ return false;
+
+ --cursor->level;
+ cursor->entry = cursor->parent;
+ cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
+ return true;
+}
+
+/**
+ * amdgpu_vm_pt_next - get next PD/PT in hieratchy
+ *
+ * @adev: amdgpu_device pointer
+ * @cursor: current state
+ *
+ * Walk the PD/PT tree to the next node.
+ */
+static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
+ struct amdgpu_vm_pt_cursor *cursor)
+{
+ /* First try a newborn child */
+ if (amdgpu_vm_pt_descendant(adev, cursor))
+ return;
+
+ /* If that didn't worked try to find a sibling */
+ while (!amdgpu_vm_pt_sibling(adev, cursor)) {
+ /* No sibling, go to our parents and grandparents */
+ if (!amdgpu_vm_pt_ancestor(cursor)) {
+ cursor->pfn = ~0ll;
+ return;
+ }
+ }
+}
+
+/**
+ * amdgpu_vm_pt_first_dfs - start a deep first search
+ *
+ * @adev: amdgpu_device structure
+ * @vm: amdgpu_vm structure
+ * @start: optional cursor to start with
+ * @cursor: state to initialize
+ *
+ * Starts a deep first traversal of the PD/PT tree.
+ */
+static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt_cursor *start,
+ struct amdgpu_vm_pt_cursor *cursor)
+{
+ if (start)
+ *cursor = *start;
+ else
+ amdgpu_vm_pt_start(adev, vm, 0, cursor);
+
+ while (amdgpu_vm_pt_descendant(adev, cursor))
+ ;
+}
+
+/**
+ * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue
+ *
+ * @start: starting point for the search
+ * @entry: current entry
+ *
+ * Returns:
+ * True when the search should continue, false otherwise.
+ */
+static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
+ struct amdgpu_vm_bo_base *entry)
+{
+ return entry && (!start || entry != start->entry);
+}
+
+/**
+ * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
+ *
+ * @adev: amdgpu_device structure
+ * @cursor: current state
+ *
+ * Move the cursor to the next node in a deep first search.
+ */
+static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
+ struct amdgpu_vm_pt_cursor *cursor)
+{
+ if (!cursor->entry)
+ return;
+
+ if (!cursor->parent)
+ cursor->entry = NULL;
+ else if (amdgpu_vm_pt_sibling(adev, cursor))
+ while (amdgpu_vm_pt_descendant(adev, cursor))
+ ;
+ else
+ amdgpu_vm_pt_ancestor(cursor);
+}
+
+/*
+ * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
+ */
+#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \
+ for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \
+ (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
+ amdgpu_vm_pt_continue_dfs((start), (entry)); \
+ (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
+
+/**
+ * amdgpu_vm_pt_clear - initially clear the PDs/PTs
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: VM to clear BO from
+ * @vmbo: BO to clear
+ * @immediate: use an immediate update
+ *
+ * Root PD needs to be reserved when calling this.
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
+ */
+int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo_vm *vmbo, bool immediate)
+{
+ unsigned int level = adev->vm_manager.root_level;
+ struct ttm_operation_ctx ctx = { true, false };
+ struct amdgpu_vm_update_params params;
+ struct amdgpu_bo *ancestor = &vmbo->bo;
+ unsigned int entries;
+ struct amdgpu_bo *bo = &vmbo->bo;
+ uint64_t addr;
+ int r, idx;
+
+ /* Figure out our place in the hierarchy */
+ if (ancestor->parent) {
+ ++level;
+ while (ancestor->parent->parent) {
+ ++level;
+ ancestor = ancestor->parent;
+ }
+ }
+
+ entries = amdgpu_bo_size(bo) / 8;
+
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (r)
+ return r;
+
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
+ return -ENODEV;
+
+ r = vm->update_funcs->map_table(vmbo);
+ if (r)
+ goto exit;
+
+ memset(&params, 0, sizeof(params));
+ params.adev = adev;
+ params.vm = vm;
+ params.immediate = immediate;
+
+ r = vm->update_funcs->prepare(&params, NULL,
+ AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR);
+ if (r)
+ goto exit;
+
+ addr = 0;
+
+ uint64_t value = 0, flags = 0;
+ if (adev->asic_type >= CHIP_VEGA10) {
+ if (level != AMDGPU_VM_PTB) {
+ /* Handle leaf PDEs as PTEs */
+ flags |= AMDGPU_PDE_PTE_FLAG(adev);
+ amdgpu_gmc_get_vm_pde(adev, level,
+ &value, &flags);
+ } else {
+ /* Workaround for fault priority problem on GMC9 */
+ flags = AMDGPU_PTE_EXECUTABLE;
+ }
+ }
+
+ r = vm->update_funcs->update(&params, vmbo, addr, 0, entries,
+ value, flags);
+ if (r)
+ goto exit;
+
+ r = vm->update_funcs->commit(&params, NULL);
+exit:
+ drm_dev_exit(idx);
+ return r;
+}
+
+/**
+ * amdgpu_vm_pt_create - create bo for PD/PT
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requesting vm
+ * @level: the page table level
+ * @immediate: use a immediate update
+ * @vmbo: pointer to the buffer object pointer
+ * @xcp_id: GPU partition id
+ */
+int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int level, bool immediate, struct amdgpu_bo_vm **vmbo,
+ int32_t xcp_id)
+{
+ struct amdgpu_bo_param bp;
+ unsigned int num_entries;
+
+ memset(&bp, 0, sizeof(bp));
+
+ bp.size = amdgpu_vm_pt_size(adev, level);
+ bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
+
+ if (!adev->gmc.is_app_apu)
+ bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
+ else
+ bp.domain = AMDGPU_GEM_DOMAIN_GTT;
+
+ bp.domain = amdgpu_bo_get_preferred_domain(adev, bp.domain);
+ bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+
+ if (level < AMDGPU_VM_PTB)
+ num_entries = amdgpu_vm_pt_num_entries(adev, level);
+ else
+ num_entries = 0;
+
+ bp.bo_ptr_size = struct_size((*vmbo), entries, num_entries);
+
+ if (vm->use_cpu_for_update)
+ bp.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+
+ bp.type = ttm_bo_type_kernel;
+ bp.no_wait_gpu = immediate;
+ bp.xcp_id_plus1 = xcp_id + 1;
+
+ if (vm->root.bo)
+ bp.resv = vm->root.bo->tbo.base.resv;
+
+ return amdgpu_bo_create_vm(adev, &bp, vmbo);
+}
+
+/**
+ * amdgpu_vm_pt_alloc - Allocate a specific page table
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: VM to allocate page tables for
+ * @cursor: Which page table to allocate
+ * @immediate: use an immediate update
+ *
+ * Make sure a specific page table or directory is allocated.
+ *
+ * Returns:
+ * 1 if page table needed to be allocated, 0 if page table was already
+ * allocated, negative errno if an error occurred.
+ */
+static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt_cursor *cursor,
+ bool immediate)
+{
+ struct amdgpu_vm_bo_base *entry = cursor->entry;
+ struct amdgpu_bo *pt_bo;
+ struct amdgpu_bo_vm *pt;
+ int r;
+
+ if (entry->bo)
+ return 0;
+
+ amdgpu_vm_eviction_unlock(vm);
+ r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt,
+ vm->root.bo->xcp_id);
+ amdgpu_vm_eviction_lock(vm);
+ if (r)
+ return r;
+
+ /* Keep a reference to the root directory to avoid
+ * freeing them up in the wrong order.
+ */
+ pt_bo = &pt->bo;
+ pt_bo->parent = amdgpu_bo_ref(cursor->parent->bo);
+ amdgpu_vm_bo_base_init(entry, vm, pt_bo);
+ r = amdgpu_vm_pt_clear(adev, vm, pt, immediate);
+ if (r)
+ goto error_free_pt;
+
+ return 0;
+
+error_free_pt:
+ amdgpu_bo_unref(&pt_bo);
+ return r;
+}
+
+/**
+ * amdgpu_vm_pt_free - free one PD/PT
+ *
+ * @entry: PDE to free
+ */
+static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
+{
+ if (!entry->bo)
+ return;
+
+ amdgpu_vm_update_stats(entry, entry->bo->tbo.resource, -1);
+ entry->bo->vm_bo = NULL;
+ ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
+
+ spin_lock(&entry->vm->status_lock);
+ list_del(&entry->vm_status);
+ spin_unlock(&entry->vm->status_lock);
+ amdgpu_bo_unref(&entry->bo);
+}
+
+/**
+ * amdgpu_vm_pt_free_list - free PD/PT levels
+ *
+ * @adev: amdgpu device structure
+ * @params: see amdgpu_vm_update_params definition
+ *
+ * Free the page directory objects saved in the flush list
+ */
+void amdgpu_vm_pt_free_list(struct amdgpu_device *adev,
+ struct amdgpu_vm_update_params *params)
+{
+ struct amdgpu_vm_bo_base *entry, *next;
+ bool unlocked = params->unlocked;
+
+ if (list_empty(&params->tlb_flush_waitlist))
+ return;
+
+ /*
+ * unlocked unmap clear page table leaves, warning to free the page entry.
+ */
+ WARN_ON(unlocked);
+
+ list_for_each_entry_safe(entry, next, &params->tlb_flush_waitlist, vm_status)
+ amdgpu_vm_pt_free(entry);
+}
+
+/**
+ * amdgpu_vm_pt_add_list - add PD/PT level to the flush list
+ *
+ * @params: parameters for the update
+ * @cursor: first PT entry to start DF search from, non NULL
+ *
+ * This list will be freed after TLB flush.
+ */
+static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params,
+ struct amdgpu_vm_pt_cursor *cursor)
+{
+ struct amdgpu_vm_pt_cursor seek;
+ struct amdgpu_vm_bo_base *entry;
+
+ spin_lock(&params->vm->status_lock);
+ for_each_amdgpu_vm_pt_dfs_safe(params->adev, params->vm, cursor, seek, entry) {
+ if (entry && entry->bo)
+ list_move(&entry->vm_status, &params->tlb_flush_waitlist);
+ }
+
+ /* enter start node now */
+ list_move(&cursor->entry->vm_status, &params->tlb_flush_waitlist);
+ spin_unlock(&params->vm->status_lock);
+}
+
+/**
+ * amdgpu_vm_pt_free_root - free root PD
+ * @adev: amdgpu device structure
+ * @vm: amdgpu vm structure
+ *
+ * Free the root page directory and everything below it.
+ */
+void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+{
+ struct amdgpu_vm_pt_cursor cursor;
+ struct amdgpu_vm_bo_base *entry;
+
+ for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) {
+ if (entry)
+ amdgpu_vm_pt_free(entry);
+ }
+}
+
+/**
+ * amdgpu_vm_pde_update - update a single level in the hierarchy
+ *
+ * @params: parameters for the update
+ * @entry: entry to update
+ *
+ * Makes sure the requested entry in parent is up to date.
+ */
+int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
+ struct amdgpu_vm_bo_base *entry)
+{
+ struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
+ struct amdgpu_bo *bo, *pbo;
+ struct amdgpu_vm *vm = params->vm;
+ uint64_t pde, pt, flags;
+ unsigned int level;
+
+ if (WARN_ON(!parent))
+ return -EINVAL;
+
+ bo = parent->bo;
+ for (level = 0, pbo = bo->parent; pbo; ++level)
+ pbo = pbo->parent;
+
+ level += params->adev->vm_manager.root_level;
+ amdgpu_gmc_get_pde_for_bo(entry->bo, level, &pt, &flags);
+ pde = (entry - to_amdgpu_bo_vm(parent->bo)->entries) * 8;
+ return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt,
+ 1, 0, flags);
+}
+
+/**
+ * amdgpu_vm_pte_update_noretry_flags - Update PTE no-retry flags
+ *
+ * @adev: amdgpu_device pointer
+ * @flags: pointer to PTE flags
+ *
+ * Update PTE no-retry flags when TF is enabled.
+ */
+static void amdgpu_vm_pte_update_noretry_flags(struct amdgpu_device *adev,
+ uint64_t *flags)
+{
+ /*
+ * Update no-retry flags with the corresponding TF
+ * no-retry combination.
+ */
+ if ((*flags & AMDGPU_VM_NORETRY_FLAGS) == AMDGPU_VM_NORETRY_FLAGS) {
+ *flags &= ~AMDGPU_VM_NORETRY_FLAGS;
+ *flags |= adev->gmc.noretry_flags;
+ }
+}
+
+/*
+ * amdgpu_vm_pte_update_flags - figure out flags for PTE updates
+ *
+ * Make sure to set the right flags for the PTEs at the desired level.
+ */
+static void amdgpu_vm_pte_update_flags(struct amdgpu_vm_update_params *params,
+ struct amdgpu_bo_vm *pt,
+ unsigned int level,
+ uint64_t pe, uint64_t addr,
+ unsigned int count, uint32_t incr,
+ uint64_t flags)
+{
+ struct amdgpu_device *adev = params->adev;
+
+ if (level != AMDGPU_VM_PTB) {
+ flags |= AMDGPU_PDE_PTE_FLAG(params->adev);
+ amdgpu_gmc_get_vm_pde(adev, level, &addr, &flags);
+
+ } else if (adev->asic_type >= CHIP_VEGA10 &&
+ !(flags & AMDGPU_PTE_VALID) &&
+ !(flags & AMDGPU_PTE_PRT_FLAG(params->adev))) {
+
+ /* Workaround for fault priority problem on GMC9 */
+ flags |= AMDGPU_PTE_EXECUTABLE;
+ }
+
+ /*
+ * Update no-retry flags to use the no-retry flag combination
+ * with TF enabled. The AMDGPU_VM_NORETRY_FLAGS flag combination
+ * does not work when TF is enabled. So, replace them with
+ * AMDGPU_VM_NORETRY_FLAGS_TF flag combination which works for
+ * all cases.
+ */
+ if (level == AMDGPU_VM_PTB)
+ amdgpu_vm_pte_update_noretry_flags(adev, &flags);
+
+ /* APUs mapping system memory may need different MTYPEs on different
+ * NUMA nodes. Only do this for contiguous ranges that can be assumed
+ * to be on the same NUMA node.
+ */
+ if ((flags & AMDGPU_PTE_SYSTEM) && (adev->flags & AMD_IS_APU) &&
+ adev->gmc.gmc_funcs->override_vm_pte_flags &&
+ num_possible_nodes() > 1 && !params->pages_addr && params->allow_override)
+ amdgpu_gmc_override_vm_pte_flags(adev, params->vm, addr, &flags);
+
+ params->vm->update_funcs->update(params, pt, pe, addr, count, incr,
+ flags);
+}
+
+/**
+ * amdgpu_vm_pte_fragment - get fragment for PTEs
+ *
+ * @params: see amdgpu_vm_update_params definition
+ * @start: first PTE to handle
+ * @end: last PTE to handle
+ * @flags: hw mapping flags
+ * @frag: resulting fragment size
+ * @frag_end: end of this fragment
+ *
+ * Returns the first possible fragment for the start and end address.
+ */
+static void amdgpu_vm_pte_fragment(struct amdgpu_vm_update_params *params,
+ uint64_t start, uint64_t end, uint64_t flags,
+ unsigned int *frag, uint64_t *frag_end)
+{
+ /**
+ * The MC L1 TLB supports variable sized pages, based on a fragment
+ * field in the PTE. When this field is set to a non-zero value, page
+ * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
+ * flags are considered valid for all PTEs within the fragment range
+ * and corresponding mappings are assumed to be physically contiguous.
+ *
+ * The L1 TLB can store a single PTE for the whole fragment,
+ * significantly increasing the space available for translation
+ * caching. This leads to large improvements in throughput when the
+ * TLB is under pressure.
+ *
+ * The L2 TLB distributes small and large fragments into two
+ * asymmetric partitions. The large fragment cache is significantly
+ * larger. Thus, we try to use large fragments wherever possible.
+ * Userspace can support this by aligning virtual base address and
+ * allocation size to the fragment size.
+ *
+ * Starting with Vega10 the fragment size only controls the L1. The L2
+ * is now directly feed with small/huge/giant pages from the walker.
+ */
+ unsigned int max_frag;
+
+ if (params->adev->asic_type < CHIP_VEGA10)
+ max_frag = params->adev->vm_manager.fragment_size;
+ else
+ max_frag = 31;
+
+ /* system pages are non continuously */
+ if (params->pages_addr) {
+ *frag = 0;
+ *frag_end = end;
+ return;
+ }
+
+ /* This intentionally wraps around if no bit is set */
+ *frag = min_t(unsigned int, ffs(start) - 1, fls64(end - start) - 1);
+ if (*frag >= max_frag) {
+ *frag = max_frag;
+ *frag_end = end & ~((1ULL << max_frag) - 1);
+ } else {
+ *frag_end = start + (1 << *frag);
+ }
+}
+
+/**
+ * amdgpu_vm_ptes_update - make sure that page tables are valid
+ *
+ * @params: see amdgpu_vm_update_params definition
+ * @start: start of GPU address range
+ * @end: end of GPU address range
+ * @dst: destination address to map to, the next dst inside the function
+ * @flags: mapping flags
+ *
+ * Update the page tables in the range @start - @end.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
+ */
+int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
+ uint64_t start, uint64_t end,
+ uint64_t dst, uint64_t flags)
+{
+ struct amdgpu_device *adev = params->adev;
+ struct amdgpu_vm_pt_cursor cursor;
+ uint64_t frag_start = start, frag_end;
+ unsigned int frag;
+ int r;
+
+ /* figure out the initial fragment */
+ amdgpu_vm_pte_fragment(params, frag_start, end, flags, &frag,
+ &frag_end);
+
+ /* walk over the address space and update the PTs */
+ amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
+ while (cursor.pfn < end) {
+ unsigned int shift, parent_shift, mask;
+ uint64_t incr, entry_end, pe_start;
+ struct amdgpu_bo *pt;
+
+ if (!params->unlocked) {
+ /* make sure that the page tables covering the
+ * address range are actually allocated
+ */
+ r = amdgpu_vm_pt_alloc(params->adev, params->vm,
+ &cursor, params->immediate);
+ if (r)
+ return r;
+ }
+
+ shift = amdgpu_vm_pt_level_shift(adev, cursor.level);
+ parent_shift = amdgpu_vm_pt_level_shift(adev, cursor.level - 1);
+ if (params->unlocked) {
+ /* Unlocked updates are only allowed on the leaves */
+ if (amdgpu_vm_pt_descendant(adev, &cursor))
+ continue;
+ } else if (adev->asic_type < CHIP_VEGA10 &&
+ (flags & AMDGPU_PTE_VALID)) {
+ /* No huge page support before GMC v9 */
+ if (cursor.level != AMDGPU_VM_PTB) {
+ if (!amdgpu_vm_pt_descendant(adev, &cursor))
+ return -ENOENT;
+ continue;
+ }
+ } else if (frag < shift) {
+ /* We can't use this level when the fragment size is
+ * smaller than the address shift. Go to the next
+ * child entry and try again.
+ */
+ if (amdgpu_vm_pt_descendant(adev, &cursor))
+ continue;
+ } else if (frag >= parent_shift) {
+ /* If the fragment size is even larger than the parent
+ * shift we should go up one level and check it again.
+ */
+ if (!amdgpu_vm_pt_ancestor(&cursor))
+ return -EINVAL;
+ continue;
+ }
+
+ pt = cursor.entry->bo;
+ if (!pt) {
+ /* We need all PDs and PTs for mapping something, */
+ if (flags & AMDGPU_PTE_VALID)
+ return -ENOENT;
+
+ /* but unmapping something can happen at a higher
+ * level.
+ */
+ if (!amdgpu_vm_pt_ancestor(&cursor))
+ return -EINVAL;
+
+ pt = cursor.entry->bo;
+ shift = parent_shift;
+ frag_end = max(frag_end, ALIGN(frag_start + 1,
+ 1ULL << shift));
+ }
+
+ /* Looks good so far, calculate parameters for the update */
+ incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
+ mask = amdgpu_vm_pt_entries_mask(adev, cursor.level);
+ pe_start = ((cursor.pfn >> shift) & mask) * 8;
+
+ if (cursor.level < AMDGPU_VM_PTB && params->unlocked)
+ /*
+ * MMU notifier callback unlocked unmap huge page, leave is PDE entry,
+ * only clear one entry. Next entry search again for PDE or PTE leave.
+ */
+ entry_end = 1ULL << shift;
+ else
+ entry_end = ((uint64_t)mask + 1) << shift;
+ entry_end += cursor.pfn & ~(entry_end - 1);
+ entry_end = min(entry_end, end);
+
+ do {
+ struct amdgpu_vm *vm = params->vm;
+ uint64_t upd_end = min(entry_end, frag_end);
+ unsigned int nptes = (upd_end - frag_start) >> shift;
+ uint64_t upd_flags = flags | AMDGPU_PTE_FRAG(frag);
+
+ /* This can happen when we set higher level PDs to
+ * silent to stop fault floods.
+ */
+ nptes = max(nptes, 1u);
+
+ trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
+ min(nptes, 32u), dst, incr,
+ upd_flags,
+ vm->task_info ? vm->task_info->tgid : 0,
+ vm->immediate.fence_context);
+ amdgpu_vm_pte_update_flags(params, to_amdgpu_bo_vm(pt),
+ cursor.level, pe_start, dst,
+ nptes, incr, upd_flags);
+
+ pe_start += nptes * 8;
+ dst += nptes * incr;
+
+ frag_start = upd_end;
+ if (frag_start >= frag_end) {
+ /* figure out the next fragment */
+ amdgpu_vm_pte_fragment(params, frag_start, end,
+ flags, &frag, &frag_end);
+ if (frag < shift)
+ break;
+ }
+ } while (frag_start < entry_end);
+
+ if (amdgpu_vm_pt_descendant(adev, &cursor)) {
+ /* Free all child entries.
+ * Update the tables with the flags and addresses and free up subsequent
+ * tables in the case of huge pages or freed up areas.
+ * This is the maximum you can free, because all other page tables are not
+ * completely covered by the range and so potentially still in use.
+ */
+ while (cursor.pfn < frag_start) {
+ /* Make sure previous mapping is freed */
+ if (cursor.entry->bo) {
+ params->needs_flush = true;
+ amdgpu_vm_pt_add_list(params, &cursor);
+ }
+ amdgpu_vm_pt_next(adev, &cursor);
+ }
+
+ } else if (frag >= shift) {
+ /* or just move on to the next on the same level. */
+ amdgpu_vm_pt_next(adev, &cursor);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_vm_pt_map_tables - have bo of root PD cpu accessible
+ * @adev: amdgpu device structure
+ * @vm: amdgpu vm structure
+ *
+ * make root page directory and everything below it cpu accessible.
+ */
+int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+{
+ struct amdgpu_vm_pt_cursor cursor;
+ struct amdgpu_vm_bo_base *entry;
+
+ for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) {
+
+ struct amdgpu_bo_vm *bo;
+ int r;
+
+ if (entry->bo) {
+ bo = to_amdgpu_bo_vm(entry->bo);
+ r = vm->update_funcs->map_table(bo);
+ if (r)
+ return r;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
new file mode 100644
index 000000000000..36805dcfa159
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -0,0 +1,296 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu_vm.h"
+#include "amdgpu_job.h"
+#include "amdgpu_object.h"
+#include "amdgpu_trace.h"
+
+#define AMDGPU_VM_SDMA_MIN_NUM_DW 256u
+#define AMDGPU_VM_SDMA_MAX_NUM_DW (16u * 1024u)
+
+/**
+ * amdgpu_vm_sdma_map_table - make sure new PDs/PTs are GTT mapped
+ *
+ * @table: newly allocated or validated PD/PT
+ */
+static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table)
+{
+ return amdgpu_ttm_alloc_gart(&table->bo.tbo);
+}
+
+/* Allocate a new job for @count PTE updates */
+static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
+ unsigned int count, u64 k_job_id)
+{
+ enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
+ : AMDGPU_IB_POOL_DELAYED;
+ struct drm_sched_entity *entity = p->immediate ? &p->vm->immediate
+ : &p->vm->delayed;
+ unsigned int ndw;
+ int r;
+
+ /* estimate how many dw we need */
+ ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
+ if (p->pages_addr)
+ ndw += count * 2;
+ ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
+
+ r = amdgpu_job_alloc_with_ib(p->adev, entity, AMDGPU_FENCE_OWNER_VM,
+ ndw * 4, pool, &p->job, k_job_id);
+ if (r)
+ return r;
+
+ p->num_dw_left = ndw;
+ return 0;
+}
+
+/**
+ * amdgpu_vm_sdma_prepare - prepare SDMA command submission
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @sync: amdgpu_sync object with fences to wait for
+ * @k_job_id: identifier of the job, for tracing purpose
+ *
+ * Returns:
+ * Negativ errno, 0 for success.
+ */
+static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
+ struct amdgpu_sync *sync, u64 k_job_id)
+{
+ int r;
+
+ r = amdgpu_vm_sdma_alloc_job(p, 0, k_job_id);
+ if (r)
+ return r;
+
+ if (!sync)
+ return 0;
+
+ r = amdgpu_sync_push_to_job(sync, p->job);
+ if (r) {
+ p->num_dw_left = 0;
+ amdgpu_job_free(p->job);
+ }
+ return r;
+}
+
+/**
+ * amdgpu_vm_sdma_commit - commit SDMA command submission
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @fence: resulting fence
+ *
+ * Returns:
+ * Negativ errno, 0 for success.
+ */
+static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
+ struct dma_fence **fence)
+{
+ struct amdgpu_ib *ib = p->job->ibs;
+ struct amdgpu_ring *ring;
+ struct dma_fence *f;
+
+ ring = container_of(p->vm->delayed.rq->sched, struct amdgpu_ring,
+ sched);
+
+ WARN_ON(ib->length_dw == 0);
+ amdgpu_ring_pad_ib(ring, ib);
+
+ if (p->needs_flush)
+ atomic64_inc(&p->vm->tlb_seq);
+
+ WARN_ON(ib->length_dw > p->num_dw_left);
+ f = amdgpu_job_submit(p->job);
+
+ if (p->unlocked) {
+ struct dma_fence *tmp = dma_fence_get(f);
+
+ swap(p->vm->last_unlocked, tmp);
+ dma_fence_put(tmp);
+ } else {
+ dma_resv_add_fence(p->vm->root.bo->tbo.base.resv, f,
+ DMA_RESV_USAGE_BOOKKEEP);
+ }
+
+ if (fence && !p->immediate) {
+ /*
+ * Most hw generations now have a separate queue for page table
+ * updates, but when the queue is shared with userspace we need
+ * the extra CPU round trip to correctly flush the TLB.
+ */
+ set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &f->flags);
+ swap(*fence, f);
+ }
+ dma_fence_put(f);
+ return 0;
+}
+
+/**
+ * amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @bo: PD/PT to update
+ * @pe: addr of the page entry
+ * @count: number of page entries to copy
+ *
+ * Traces the parameters and calls the DMA function to copy the PTEs.
+ */
+static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
+ struct amdgpu_bo *bo, uint64_t pe,
+ unsigned count)
+{
+ struct amdgpu_ib *ib = p->job->ibs;
+ uint64_t src = ib->gpu_addr;
+
+ src += p->num_dw_left * 4;
+
+ pe += amdgpu_bo_gpu_offset_no_check(bo);
+ trace_amdgpu_vm_copy_ptes(pe, src, count, p->immediate);
+
+ amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
+}
+
+/**
+ * amdgpu_vm_sdma_set_ptes - helper to call the right asic function
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @bo: PD/PT to update
+ * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Traces the parameters and calls the right asic functions
+ * to setup the page table using the DMA.
+ */
+static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
+ struct amdgpu_bo *bo, uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint64_t flags)
+{
+ struct amdgpu_ib *ib = p->job->ibs;
+
+ pe += amdgpu_bo_gpu_offset_no_check(bo);
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
+ if (count < 3) {
+ amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
+ count, incr);
+ } else {
+ amdgpu_vm_set_pte_pde(p->adev, ib, pe, addr,
+ count, incr, flags);
+ }
+}
+
+/**
+ * amdgpu_vm_sdma_update - execute VM update
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @vmbo: PD/PT to update
+ * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Reserve space in the IB, setup mapping buffer on demand and write commands to
+ * the IB.
+ */
+static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
+ struct amdgpu_bo_vm *vmbo, uint64_t pe,
+ uint64_t addr, unsigned count, uint32_t incr,
+ uint64_t flags)
+{
+ struct amdgpu_bo *bo = &vmbo->bo;
+ struct dma_resv_iter cursor;
+ unsigned int i, ndw, nptes;
+ struct dma_fence *fence;
+ uint64_t *pte;
+ int r;
+
+ /* Wait for PD/PT moves to be completed */
+ dma_resv_iter_begin(&cursor, bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ dma_fence_get(fence);
+ r = drm_sched_job_add_dependency(&p->job->base, fence);
+ if (r) {
+ dma_fence_put(fence);
+ dma_resv_iter_end(&cursor);
+ return r;
+ }
+ }
+ dma_resv_iter_end(&cursor);
+
+ do {
+ ndw = p->num_dw_left;
+ ndw -= p->job->ibs->length_dw;
+
+ if (ndw < 32) {
+ r = amdgpu_vm_sdma_commit(p, NULL);
+ if (r)
+ return r;
+
+ r = amdgpu_vm_sdma_alloc_job(p, count,
+ AMDGPU_KERNEL_JOB_ID_VM_UPDATE);
+ if (r)
+ return r;
+ }
+
+ if (!p->pages_addr) {
+ /* set page commands needed */
+ amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
+ incr, flags);
+ return 0;
+ }
+
+ /* copy commands needed */
+ ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
+
+ /* for padding */
+ ndw -= 7;
+
+ nptes = min(count, ndw / 2);
+
+ /* Put the PTEs at the end of the IB. */
+ p->num_dw_left -= nptes * 2;
+ pte = (uint64_t *)&(p->job->ibs->ptr[p->num_dw_left]);
+ for (i = 0; i < nptes; ++i, addr += incr) {
+ pte[i] = amdgpu_vm_map_gart(p->pages_addr, addr);
+ pte[i] |= flags;
+ }
+
+ amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
+
+ pe += nptes * 8;
+ count -= nptes;
+ } while (count);
+
+ return 0;
+}
+
+const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs = {
+ .map_table = amdgpu_vm_sdma_map_table,
+ .prepare = amdgpu_vm_sdma_prepare,
+ .update = amdgpu_vm_sdma_update,
+ .commit = amdgpu_vm_sdma_commit
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
new file mode 100644
index 000000000000..5d26797356a3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/dma-fence.h>
+#include <linux/workqueue.h>
+
+#include "amdgpu.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_gmc.h"
+
+struct amdgpu_tlb_fence {
+ struct dma_fence base;
+ struct amdgpu_device *adev;
+ struct dma_fence *dependency;
+ struct work_struct work;
+ spinlock_t lock;
+ uint16_t pasid;
+
+};
+
+static const char *amdgpu_tlb_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "amdgpu tlb fence";
+}
+
+static const char *amdgpu_tlb_fence_get_timeline_name(struct dma_fence *f)
+{
+ return "amdgpu tlb timeline";
+}
+
+static void amdgpu_tlb_fence_work(struct work_struct *work)
+{
+ struct amdgpu_tlb_fence *f = container_of(work, typeof(*f), work);
+ int r;
+
+ if (f->dependency) {
+ dma_fence_wait(f->dependency, false);
+ dma_fence_put(f->dependency);
+ f->dependency = NULL;
+ }
+
+ r = amdgpu_gmc_flush_gpu_tlb_pasid(f->adev, f->pasid, 2, true, 0);
+ if (r) {
+ dev_err(f->adev->dev, "TLB flush failed for PASID %d.\n",
+ f->pasid);
+ dma_fence_set_error(&f->base, r);
+ }
+
+ dma_fence_signal(&f->base);
+ dma_fence_put(&f->base);
+}
+
+static const struct dma_fence_ops amdgpu_tlb_fence_ops = {
+ .get_driver_name = amdgpu_tlb_fence_get_driver_name,
+ .get_timeline_name = amdgpu_tlb_fence_get_timeline_name
+};
+
+void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct dma_fence **fence)
+{
+ struct amdgpu_tlb_fence *f;
+
+ f = kmalloc(sizeof(*f), GFP_KERNEL);
+ if (!f) {
+ /*
+ * We can't fail since the PDEs and PTEs are already updated, so
+ * just block for the dependency and execute the TLB flush
+ */
+ if (*fence)
+ dma_fence_wait(*fence, false);
+
+ amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, 2, true, 0);
+ *fence = dma_fence_get_stub();
+ return;
+ }
+
+ f->adev = adev;
+ f->dependency = *fence;
+ f->pasid = vm->pasid;
+ INIT_WORK(&f->work, amdgpu_tlb_fence_work);
+ spin_lock_init(&f->lock);
+
+ dma_fence_init64(&f->base, &amdgpu_tlb_fence_ops, &f->lock,
+ vm->tlb_fence_context, atomic64_read(&vm->tlb_seq));
+
+ /* TODO: We probably need a separate wq here */
+ dma_fence_get(&f->base);
+ schedule_work(&f->work);
+
+ *fence = &f->base;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
new file mode 100644
index 000000000000..474bfe36c0c2
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -0,0 +1,992 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/firmware.h>
+#include <drm/drm_drv.h>
+
+#include "amdgpu.h"
+#include "amdgpu_ucode.h"
+#include "amdgpu_vpe.h"
+#include "amdgpu_smu.h"
+#include "soc15_common.h"
+#include "vpe_v6_1.h"
+
+#define AMDGPU_CSA_VPE_SIZE 64
+/* VPE CSA resides in the 4th page of CSA */
+#define AMDGPU_CSA_VPE_OFFSET (4096 * 3)
+
+/* 1 second timeout */
+#define VPE_IDLE_TIMEOUT msecs_to_jiffies(1000)
+
+#define VPE_MAX_DPM_LEVEL 4
+#define FIXED1_8_BITS_PER_FRACTIONAL_PART 8
+#define GET_PRATIO_INTEGER_PART(x) ((x) >> FIXED1_8_BITS_PER_FRACTIONAL_PART)
+
+static void vpe_set_ring_funcs(struct amdgpu_device *adev);
+
+static inline uint16_t div16_u16_rem(uint16_t dividend, uint16_t divisor, uint16_t *remainder)
+{
+ *remainder = dividend % divisor;
+ return dividend / divisor;
+}
+
+static inline uint16_t complete_integer_division_u16(
+ uint16_t dividend,
+ uint16_t divisor,
+ uint16_t *remainder)
+{
+ return div16_u16_rem(dividend, divisor, (uint16_t *)remainder);
+}
+
+static uint16_t vpe_u1_8_from_fraction(uint16_t numerator, uint16_t denominator)
+{
+ u16 arg1_value = numerator;
+ u16 arg2_value = denominator;
+
+ uint16_t remainder;
+
+ /* determine integer part */
+ uint16_t res_value = complete_integer_division_u16(
+ arg1_value, arg2_value, &remainder);
+
+ if (res_value > 127 /* CHAR_MAX */)
+ return 0;
+
+ /* determine fractional part */
+ {
+ unsigned int i = FIXED1_8_BITS_PER_FRACTIONAL_PART;
+
+ do {
+ remainder <<= 1;
+
+ res_value <<= 1;
+
+ if (remainder >= arg2_value) {
+ res_value |= 1;
+ remainder -= arg2_value;
+ }
+ } while (--i != 0);
+ }
+
+ /* round up LSB */
+ {
+ uint16_t summand = (remainder << 1) >= arg2_value;
+
+ if ((res_value + summand) > 32767 /* SHRT_MAX */)
+ return 0;
+
+ res_value += summand;
+ }
+
+ return res_value;
+}
+
+static uint16_t vpe_internal_get_pratio(uint16_t from_frequency, uint16_t to_frequency)
+{
+ uint16_t pratio = vpe_u1_8_from_fraction(from_frequency, to_frequency);
+
+ if (GET_PRATIO_INTEGER_PART(pratio) > 1)
+ pratio = 0;
+
+ return pratio;
+}
+
+/*
+ * VPE has 4 DPM levels from level 0 (lowerest) to 3 (highest),
+ * VPE FW will dynamically decide which level should be used according to current loading.
+ *
+ * Get VPE and SOC clocks from PM, and select the appropriate four clock values,
+ * calculate the ratios of adjusting from one clock to another.
+ * The VPE FW can then request the appropriate frequency from the PMFW.
+ */
+int amdgpu_vpe_configure_dpm(struct amdgpu_vpe *vpe)
+{
+ struct amdgpu_device *adev = vpe->ring.adev;
+ uint32_t dpm_ctl;
+
+ if (adev->pm.dpm_enabled) {
+ struct dpm_clocks clock_table = { 0 };
+ struct dpm_clock *VPEClks;
+ struct dpm_clock *SOCClks;
+ uint32_t idx;
+ uint32_t vpeclk_enalbled_num = 0;
+ uint32_t pratio_vmax_vnorm = 0, pratio_vnorm_vmid = 0, pratio_vmid_vmin = 0;
+ uint16_t pratio_vmin_freq = 0, pratio_vmid_freq = 0, pratio_vnorm_freq = 0, pratio_vmax_freq = 0;
+
+ dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable));
+ dpm_ctl |= 1; /* DPM enablement */
+ WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl);
+
+ /* Get VPECLK and SOCCLK */
+ if (amdgpu_dpm_get_dpm_clock_table(adev, &clock_table)) {
+ dev_dbg(adev->dev, "%s: get clock failed!\n", __func__);
+ goto disable_dpm;
+ }
+
+ SOCClks = clock_table.SocClocks;
+ VPEClks = clock_table.VPEClocks;
+
+ /* Comfirm enabled vpe clk num
+ * Enabled VPE clocks are ordered from low to high in VPEClks
+ * The highest valid clock index+1 is the number of VPEClks
+ */
+ for (idx = PP_SMU_NUM_VPECLK_DPM_LEVELS; idx && !vpeclk_enalbled_num; idx--)
+ if (VPEClks[idx-1].Freq)
+ vpeclk_enalbled_num = idx;
+
+ /* vpe dpm only cares 4 levels. */
+ for (idx = 0; idx < VPE_MAX_DPM_LEVEL; idx++) {
+ uint32_t soc_dpm_level;
+ uint32_t min_freq;
+
+ if (idx == 0)
+ soc_dpm_level = 0;
+ else
+ soc_dpm_level = (idx * 2) + 1;
+
+ /* clamp the max level */
+ if (soc_dpm_level > vpeclk_enalbled_num - 1)
+ soc_dpm_level = vpeclk_enalbled_num - 1;
+
+ min_freq = (SOCClks[soc_dpm_level].Freq < VPEClks[soc_dpm_level].Freq) ?
+ SOCClks[soc_dpm_level].Freq : VPEClks[soc_dpm_level].Freq;
+
+ switch (idx) {
+ case 0:
+ pratio_vmin_freq = min_freq;
+ break;
+ case 1:
+ pratio_vmid_freq = min_freq;
+ break;
+ case 2:
+ pratio_vnorm_freq = min_freq;
+ break;
+ case 3:
+ pratio_vmax_freq = min_freq;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (pratio_vmin_freq && pratio_vmid_freq && pratio_vnorm_freq && pratio_vmax_freq) {
+ uint32_t pratio_ctl;
+
+ pratio_vmax_vnorm = (uint32_t)vpe_internal_get_pratio(pratio_vmax_freq, pratio_vnorm_freq);
+ pratio_vnorm_vmid = (uint32_t)vpe_internal_get_pratio(pratio_vnorm_freq, pratio_vmid_freq);
+ pratio_vmid_vmin = (uint32_t)vpe_internal_get_pratio(pratio_vmid_freq, pratio_vmin_freq);
+
+ pratio_ctl = pratio_vmax_vnorm | (pratio_vnorm_vmid << 9) | (pratio_vmid_vmin << 18);
+ WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_pratio), pratio_ctl); /* PRatio */
+ WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_interval), 24000); /* 1ms, unit=1/24MHz */
+ WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_decision_threshold), 1200000); /* 50ms */
+ WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_busy_clamp_threshold), 1200000);/* 50ms */
+ WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_idle_clamp_threshold), 1200000);/* 50ms */
+ dev_dbg(adev->dev, "%s: configure vpe dpm pratio done!\n", __func__);
+ } else {
+ dev_dbg(adev->dev, "%s: invalid pratio parameters!\n", __func__);
+ goto disable_dpm;
+ }
+ }
+ return 0;
+
+disable_dpm:
+ dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable));
+ dpm_ctl &= 0xfffffffe; /* Disable DPM */
+ WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl);
+ dev_dbg(adev->dev, "%s: disable vpe dpm\n", __func__);
+ return -EINVAL;
+}
+
+int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev)
+{
+ struct amdgpu_firmware_info ucode = {
+ .ucode_id = AMDGPU_UCODE_ID_VPE,
+ .mc_addr = adev->vpe.cmdbuf_gpu_addr,
+ .ucode_size = 8,
+ };
+
+ return psp_execute_ip_fw_load(&adev->psp, &ucode);
+}
+
+int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe)
+{
+ struct amdgpu_device *adev = vpe->ring.adev;
+ const struct vpe_firmware_header_v1_0 *vpe_hdr;
+ char fw_prefix[32];
+ int ret;
+
+ amdgpu_ucode_ip_version_decode(adev, VPE_HWIP, fw_prefix, sizeof(fw_prefix));
+ ret = amdgpu_ucode_request(adev, &adev->vpe.fw, AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s.bin", fw_prefix);
+ if (ret)
+ goto out;
+
+ vpe_hdr = (const struct vpe_firmware_header_v1_0 *)adev->vpe.fw->data;
+ adev->vpe.fw_version = le32_to_cpu(vpe_hdr->header.ucode_version);
+ adev->vpe.feature_version = le32_to_cpu(vpe_hdr->ucode_feature_version);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ struct amdgpu_firmware_info *info;
+
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_VPE_CTX];
+ info->ucode_id = AMDGPU_UCODE_ID_VPE_CTX;
+ info->fw = adev->vpe.fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(vpe_hdr->ctx_ucode_size_bytes), PAGE_SIZE);
+
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_VPE_CTL];
+ info->ucode_id = AMDGPU_UCODE_ID_VPE_CTL;
+ info->fw = adev->vpe.fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(vpe_hdr->ctl_ucode_size_bytes), PAGE_SIZE);
+ }
+
+ return 0;
+out:
+ dev_err(adev->dev, "fail to initialize vpe microcode\n");
+ release_firmware(adev->vpe.fw);
+ adev->vpe.fw = NULL;
+ return ret;
+}
+
+int amdgpu_vpe_ring_init(struct amdgpu_vpe *vpe)
+{
+ struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe);
+ struct amdgpu_ring *ring = &vpe->ring;
+ int ret;
+
+ ring->ring_obj = NULL;
+ ring->use_doorbell = true;
+ ring->vm_hub = AMDGPU_MMHUB0(0);
+ ring->doorbell_index = (adev->doorbell_index.vpe_ring << 1);
+ snprintf(ring->name, 4, "vpe");
+
+ ret = amdgpu_ring_init(adev, ring, 1024, &vpe->trap_irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe)
+{
+ amdgpu_ring_fini(&vpe->ring);
+
+ return 0;
+}
+
+static int vpe_early_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_vpe *vpe = &adev->vpe;
+
+ switch (amdgpu_ip_version(adev, VPE_HWIP, 0)) {
+ case IP_VERSION(6, 1, 0):
+ case IP_VERSION(6, 1, 3):
+ vpe_v6_1_set_funcs(vpe);
+ break;
+ case IP_VERSION(6, 1, 1):
+ vpe_v6_1_set_funcs(vpe);
+ vpe->collaborate_mode = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ vpe_set_ring_funcs(adev);
+ vpe_set_regs(vpe);
+
+ dev_info(adev->dev, "VPE: collaborate mode %s", vpe->collaborate_mode ? "true" : "false");
+
+ return 0;
+}
+
+static void vpe_idle_work_handler(struct work_struct *work)
+{
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device, vpe.idle_work.work);
+ unsigned int fences = 0;
+
+ fences += amdgpu_fence_count_emitted(&adev->vpe.ring);
+
+ if (fences == 0)
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
+ else
+ schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
+}
+
+static int vpe_common_init(struct amdgpu_vpe *vpe)
+{
+ struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe);
+ int r;
+
+ r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->vpe.cmdbuf_obj,
+ &adev->vpe.cmdbuf_gpu_addr,
+ (void **)&adev->vpe.cmdbuf_cpu_addr);
+ if (r) {
+ dev_err(adev->dev, "VPE: failed to allocate cmdbuf bo %d\n", r);
+ return r;
+ }
+
+ vpe->context_started = false;
+ INIT_DELAYED_WORK(&adev->vpe.idle_work, vpe_idle_work_handler);
+
+ return 0;
+}
+
+static int vpe_sw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_vpe *vpe = &adev->vpe;
+ int ret;
+
+ ret = vpe_common_init(vpe);
+ if (ret)
+ goto out;
+
+ ret = vpe_irq_init(vpe);
+ if (ret)
+ goto out;
+
+ ret = vpe_ring_init(vpe);
+ if (ret)
+ goto out;
+
+ ret = vpe_init_microcode(vpe);
+ if (ret)
+ goto out;
+
+ adev->vpe.supported_reset =
+ amdgpu_get_soft_full_reset_mask(&adev->vpe.ring);
+ if (!amdgpu_sriov_vf(adev))
+ adev->vpe.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
+ ret = amdgpu_vpe_sysfs_reset_mask_init(adev);
+ if (ret)
+ goto out;
+out:
+ return ret;
+}
+
+static int vpe_sw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_vpe *vpe = &adev->vpe;
+
+ release_firmware(vpe->fw);
+ vpe->fw = NULL;
+
+ amdgpu_vpe_sysfs_reset_mask_fini(adev);
+ vpe_ring_fini(vpe);
+
+ amdgpu_bo_free_kernel(&adev->vpe.cmdbuf_obj,
+ &adev->vpe.cmdbuf_gpu_addr,
+ (void **)&adev->vpe.cmdbuf_cpu_addr);
+
+ return 0;
+}
+
+static int vpe_hw_init(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_vpe *vpe = &adev->vpe;
+ int ret;
+
+ /* Power on VPE */
+ ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,
+ AMD_PG_STATE_UNGATE);
+ if (ret)
+ return ret;
+
+ ret = vpe_load_microcode(vpe);
+ if (ret)
+ return ret;
+
+ ret = vpe_ring_start(vpe);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int vpe_hw_fini(struct amdgpu_ip_block *ip_block)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_vpe *vpe = &adev->vpe;
+
+ cancel_delayed_work_sync(&adev->vpe.idle_work);
+
+ vpe_ring_stop(vpe);
+
+ /* Power off VPE */
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_GATE);
+
+ return 0;
+}
+
+static int vpe_suspend(struct amdgpu_ip_block *ip_block)
+{
+ return vpe_hw_fini(ip_block);
+}
+
+static int vpe_resume(struct amdgpu_ip_block *ip_block)
+{
+ return vpe_hw_init(ip_block);
+}
+
+static void vpe_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ if (i == 0)
+ amdgpu_ring_write(ring, ring->funcs->nop |
+ VPE_CMD_NOP_HEADER_COUNT(count - 1));
+ else
+ amdgpu_ring_write(ring, ring->funcs->nop);
+}
+
+static uint64_t vpe_get_csa_mc_addr(struct amdgpu_ring *ring, uint32_t vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t index = 0;
+ uint64_t csa_mc_addr;
+
+ if (amdgpu_sriov_vf(adev) || vmid == 0 || !adev->gfx.mcbp)
+ return 0;
+
+ csa_mc_addr = amdgpu_csa_vaddr(adev) + AMDGPU_CSA_VPE_OFFSET +
+ index * AMDGPU_CSA_VPE_SIZE;
+
+ return csa_mc_addr;
+}
+
+static void vpe_ring_emit_pred_exec(struct amdgpu_ring *ring,
+ uint32_t device_select,
+ uint32_t exec_count)
+{
+ if (!ring->adev->vpe.collaborate_mode)
+ return;
+
+ amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_PRED_EXE, 0) |
+ (device_select << 16));
+ amdgpu_ring_write(ring, exec_count & 0x1fff);
+}
+
+static void vpe_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
+ struct amdgpu_ib *ib,
+ uint32_t flags)
+{
+ uint32_t vmid = AMDGPU_JOB_GET_VMID(job);
+ uint64_t csa_mc_addr = vpe_get_csa_mc_addr(ring, vmid);
+
+ amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_INDIRECT, 0) |
+ VPE_CMD_INDIRECT_HEADER_VMID(vmid & 0xf));
+
+ /* base must be 32 byte aligned */
+ amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0);
+ amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
+ amdgpu_ring_write(ring, ib->length_dw);
+ amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr));
+ amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr));
+}
+
+static void vpe_ring_emit_fence(struct amdgpu_ring *ring, uint64_t addr,
+ uint64_t seq, unsigned int flags)
+{
+ int i = 0;
+
+ do {
+ /* write the fence */
+ amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0));
+ /* zero in first two bits */
+ WARN_ON_ONCE(addr & 0x3);
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, i == 0 ? lower_32_bits(seq) : upper_32_bits(seq));
+ addr += 4;
+ } while ((flags & AMDGPU_FENCE_FLAG_64BIT) && (i++ < 1));
+
+ if (flags & AMDGPU_FENCE_FLAG_INT) {
+ /* generate an interrupt */
+ amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_TRAP, 0));
+ amdgpu_ring_write(ring, 0);
+ }
+
+}
+
+static void vpe_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
+{
+ uint32_t seq = ring->fence_drv.sync_seq;
+ uint64_t addr = ring->fence_drv.gpu_addr;
+
+ vpe_ring_emit_pred_exec(ring, 0, 6);
+
+ /* wait for idle */
+ amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_POLL_REGMEM,
+ VPE_POLL_REGMEM_SUBOP_REGMEM) |
+ VPE_CMD_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
+ VPE_CMD_POLL_REGMEM_HEADER_MEM(1));
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, seq); /* reference */
+ amdgpu_ring_write(ring, 0xffffffff); /* mask */
+ amdgpu_ring_write(ring, VPE_CMD_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+ VPE_CMD_POLL_REGMEM_DW5_INTERVAL(4));
+}
+
+static void vpe_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val)
+{
+ vpe_ring_emit_pred_exec(ring, 0, 3);
+
+ amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_REG_WRITE, 0));
+ amdgpu_ring_write(ring, reg << 2);
+ amdgpu_ring_write(ring, val);
+}
+
+static void vpe_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t val, uint32_t mask)
+{
+ vpe_ring_emit_pred_exec(ring, 0, 6);
+
+ amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_POLL_REGMEM,
+ VPE_POLL_REGMEM_SUBOP_REGMEM) |
+ VPE_CMD_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
+ VPE_CMD_POLL_REGMEM_HEADER_MEM(0));
+ amdgpu_ring_write(ring, reg << 2);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, val); /* reference */
+ amdgpu_ring_write(ring, mask); /* mask */
+ amdgpu_ring_write(ring, VPE_CMD_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
+ VPE_CMD_POLL_REGMEM_DW5_INTERVAL(10));
+}
+
+static void vpe_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned int vmid,
+ uint64_t pd_addr)
+{
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
+}
+
+static unsigned int vpe_ring_init_cond_exec(struct amdgpu_ring *ring,
+ uint64_t addr)
+{
+ unsigned int ret;
+
+ amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_COND_EXE, 0));
+ amdgpu_ring_write(ring, lower_32_bits(addr));
+ amdgpu_ring_write(ring, upper_32_bits(addr));
+ amdgpu_ring_write(ring, 1);
+ ret = ring->wptr & ring->buf_mask;
+ amdgpu_ring_write(ring, 0);
+
+ return ret;
+}
+
+static int vpe_ring_preempt_ib(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vpe *vpe = &adev->vpe;
+ uint32_t preempt_reg = vpe->regs.queue0_preempt;
+ int i, r = 0;
+
+ /* assert preemption condition */
+ amdgpu_ring_set_preempt_cond_exec(ring, false);
+
+ /* emit the trailing fence */
+ ring->trail_seq += 1;
+ amdgpu_ring_alloc(ring, 10);
+ vpe_ring_emit_fence(ring, ring->trail_fence_gpu_addr, ring->trail_seq, 0);
+ amdgpu_ring_commit(ring);
+
+ /* assert IB preemption */
+ WREG32(vpe_get_reg_offset(vpe, ring->me, preempt_reg), 1);
+
+ /* poll the trailing fence */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (ring->trail_seq ==
+ le32_to_cpu(*(ring->trail_fence_cpu_addr)))
+ break;
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout) {
+ r = -EINVAL;
+ dev_err(adev->dev, "ring %d failed to be preempted\n", ring->idx);
+ }
+
+ /* deassert IB preemption */
+ WREG32(vpe_get_reg_offset(vpe, ring->me, preempt_reg), 0);
+
+ /* deassert the preemption condition */
+ amdgpu_ring_set_preempt_cond_exec(ring, true);
+
+ return r;
+}
+
+static int vpe_set_clockgating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int vpe_set_powergating_state(struct amdgpu_ip_block *ip_block,
+ enum amd_powergating_state state)
+{
+ struct amdgpu_device *adev = ip_block->adev;
+ struct amdgpu_vpe *vpe = &adev->vpe;
+
+ if (!adev->pm.dpm_enabled)
+ dev_err(adev->dev, "Without PM, cannot support powergating\n");
+
+ dev_dbg(adev->dev, "%s: %s!\n", __func__, (state == AMD_PG_STATE_GATE) ? "GATE":"UNGATE");
+
+ if (state == AMD_PG_STATE_GATE) {
+ amdgpu_dpm_enable_vpe(adev, false);
+ vpe->context_started = false;
+ } else {
+ amdgpu_dpm_enable_vpe(adev, true);
+ }
+
+ return 0;
+}
+
+static uint64_t vpe_ring_get_rptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vpe *vpe = &adev->vpe;
+ uint64_t rptr;
+
+ if (ring->use_doorbell) {
+ rptr = atomic64_read((atomic64_t *)ring->rptr_cpu_addr);
+ dev_dbg(adev->dev, "rptr/doorbell before shift == 0x%016llx\n", rptr);
+ } else {
+ rptr = RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_rptr_hi));
+ rptr = rptr << 32;
+ rptr |= RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_rptr_lo));
+ dev_dbg(adev->dev, "rptr before shift [%i] == 0x%016llx\n", ring->me, rptr);
+ }
+
+ return (rptr >> 2);
+}
+
+static uint64_t vpe_ring_get_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vpe *vpe = &adev->vpe;
+ uint64_t wptr;
+
+ if (ring->use_doorbell) {
+ wptr = atomic64_read((atomic64_t *)ring->wptr_cpu_addr);
+ dev_dbg(adev->dev, "wptr/doorbell before shift == 0x%016llx\n", wptr);
+ } else {
+ wptr = RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_hi));
+ wptr = wptr << 32;
+ wptr |= RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_lo));
+ dev_dbg(adev->dev, "wptr before shift [%i] == 0x%016llx\n", ring->me, wptr);
+ }
+
+ return (wptr >> 2);
+}
+
+static void vpe_ring_set_wptr(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vpe *vpe = &adev->vpe;
+
+ if (ring->use_doorbell) {
+ dev_dbg(adev->dev, "Using doorbell, \
+ wptr_offs == 0x%08x, \
+ lower_32_bits(ring->wptr) << 2 == 0x%08x, \
+ upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
+ ring->wptr_offs,
+ lower_32_bits(ring->wptr << 2),
+ upper_32_bits(ring->wptr << 2));
+ atomic64_set((atomic64_t *)ring->wptr_cpu_addr, ring->wptr << 2);
+ WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
+ if (vpe->collaborate_mode)
+ WDOORBELL64(ring->doorbell_index + 4, ring->wptr << 2);
+ } else {
+ int i;
+
+ for (i = 0; i < vpe->num_instances; i++) {
+ dev_dbg(adev->dev, "Not using doorbell, \
+ regVPEC_QUEUE0_RB_WPTR == 0x%08x, \
+ regVPEC_QUEUE0_RB_WPTR_HI == 0x%08x\n",
+ lower_32_bits(ring->wptr << 2),
+ upper_32_bits(ring->wptr << 2));
+ WREG32(vpe_get_reg_offset(vpe, i, vpe->regs.queue0_rb_wptr_lo),
+ lower_32_bits(ring->wptr << 2));
+ WREG32(vpe_get_reg_offset(vpe, i, vpe->regs.queue0_rb_wptr_hi),
+ upper_32_bits(ring->wptr << 2));
+ }
+ }
+}
+
+static int vpe_ring_test_ring(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ const uint32_t test_pattern = 0xdeadbeef;
+ uint32_t index, i;
+ uint64_t wb_addr;
+ int ret;
+
+ ret = amdgpu_device_wb_get(adev, &index);
+ if (ret) {
+ dev_err(adev->dev, "(%d) failed to allocate wb slot\n", ret);
+ return ret;
+ }
+
+ adev->wb.wb[index] = 0;
+ wb_addr = adev->wb.gpu_addr + (index * 4);
+
+ ret = amdgpu_ring_alloc(ring, 4);
+ if (ret) {
+ dev_err(adev->dev, "amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, ret);
+ goto out;
+ }
+
+ amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0));
+ amdgpu_ring_write(ring, lower_32_bits(wb_addr));
+ amdgpu_ring_write(ring, upper_32_bits(wb_addr));
+ amdgpu_ring_write(ring, test_pattern);
+ amdgpu_ring_commit(ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (le32_to_cpu(adev->wb.wb[index]) == test_pattern)
+ goto out;
+ udelay(1);
+ }
+
+ ret = -ETIMEDOUT;
+out:
+ amdgpu_device_wb_free(adev, index);
+
+ return ret;
+}
+
+static int vpe_ring_test_ib(struct amdgpu_ring *ring, long timeout)
+{
+ struct amdgpu_device *adev = ring->adev;
+ const uint32_t test_pattern = 0xdeadbeef;
+ struct amdgpu_ib ib = {};
+ struct dma_fence *f = NULL;
+ uint32_t index;
+ uint64_t wb_addr;
+ int ret;
+
+ ret = amdgpu_device_wb_get(adev, &index);
+ if (ret) {
+ dev_err(adev->dev, "(%d) failed to allocate wb slot\n", ret);
+ return ret;
+ }
+
+ adev->wb.wb[index] = 0;
+ wb_addr = adev->wb.gpu_addr + (index * 4);
+
+ ret = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
+ if (ret)
+ goto err0;
+
+ ib.ptr[0] = VPE_CMD_HEADER(VPE_CMD_OPCODE_FENCE, 0);
+ ib.ptr[1] = lower_32_bits(wb_addr);
+ ib.ptr[2] = upper_32_bits(wb_addr);
+ ib.ptr[3] = test_pattern;
+ ib.ptr[4] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0);
+ ib.ptr[5] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0);
+ ib.ptr[6] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0);
+ ib.ptr[7] = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0);
+ ib.length_dw = 8;
+
+ ret = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
+ if (ret)
+ goto err1;
+
+ ret = dma_fence_wait_timeout(f, false, timeout);
+ if (ret <= 0) {
+ ret = ret ? : -ETIMEDOUT;
+ goto err1;
+ }
+
+ ret = (le32_to_cpu(adev->wb.wb[index]) == test_pattern) ? 0 : -EINVAL;
+
+err1:
+ amdgpu_ib_free(&ib, NULL);
+ dma_fence_put(f);
+err0:
+ amdgpu_device_wb_free(adev, index);
+
+ return ret;
+}
+
+static void vpe_ring_begin_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_vpe *vpe = &adev->vpe;
+
+ cancel_delayed_work_sync(&adev->vpe.idle_work);
+
+ /* Power on VPE and notify VPE of new context */
+ if (!vpe->context_started) {
+ uint32_t context_notify;
+
+ /* Power on VPE */
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE, AMD_PG_STATE_UNGATE);
+
+ /* Indicates that a job from a new context has been submitted. */
+ context_notify = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator));
+ if ((context_notify & 0x1) == 0)
+ context_notify |= 0x1;
+ else
+ context_notify &= ~(0x1);
+ WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator), context_notify);
+ vpe->context_started = true;
+ }
+}
+
+static void vpe_ring_end_use(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
+}
+
+static int vpe_ring_reset(struct amdgpu_ring *ring,
+ unsigned int vmid,
+ struct amdgpu_fence *timedout_fence)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r;
+
+ amdgpu_ring_reset_helper_begin(ring, timedout_fence);
+
+ r = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,
+ AMD_PG_STATE_GATE);
+ if (r)
+ return r;
+ r = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VPE,
+ AMD_PG_STATE_UNGATE);
+ if (r)
+ return r;
+
+ return amdgpu_ring_reset_helper_end(ring, timedout_fence);
+}
+
+static ssize_t amdgpu_get_vpe_reset_mask(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (!adev)
+ return -ENODEV;
+
+ return amdgpu_show_reset_mask(buf, adev->vpe.supported_reset);
+}
+
+static DEVICE_ATTR(vpe_reset_mask, 0444,
+ amdgpu_get_vpe_reset_mask, NULL);
+
+int amdgpu_vpe_sysfs_reset_mask_init(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (adev->vpe.num_instances) {
+ r = device_create_file(adev->dev, &dev_attr_vpe_reset_mask);
+ if (r)
+ return r;
+ }
+
+ return r;
+}
+
+void amdgpu_vpe_sysfs_reset_mask_fini(struct amdgpu_device *adev)
+{
+ if (adev->dev->kobj.sd) {
+ if (adev->vpe.num_instances)
+ device_remove_file(adev->dev, &dev_attr_vpe_reset_mask);
+ }
+}
+
+static const struct amdgpu_ring_funcs vpe_ring_funcs = {
+ .type = AMDGPU_RING_TYPE_VPE,
+ .align_mask = 0xf,
+ .nop = VPE_CMD_HEADER(VPE_CMD_OPCODE_NOP, 0),
+ .support_64bit_ptrs = true,
+ .get_rptr = vpe_ring_get_rptr,
+ .get_wptr = vpe_ring_get_wptr,
+ .set_wptr = vpe_ring_set_wptr,
+ .emit_frame_size =
+ 5 + /* vpe_ring_init_cond_exec */
+ 6 + /* vpe_ring_emit_pipeline_sync */
+ 10 + 10 + 10 + /* vpe_ring_emit_fence */
+ /* vpe_ring_emit_vm_flush */
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6,
+ .emit_ib_size = 7 + 6,
+ .emit_ib = vpe_ring_emit_ib,
+ .emit_pipeline_sync = vpe_ring_emit_pipeline_sync,
+ .emit_fence = vpe_ring_emit_fence,
+ .emit_vm_flush = vpe_ring_emit_vm_flush,
+ .emit_wreg = vpe_ring_emit_wreg,
+ .emit_reg_wait = vpe_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+ .insert_nop = vpe_ring_insert_nop,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .test_ring = vpe_ring_test_ring,
+ .test_ib = vpe_ring_test_ib,
+ .init_cond_exec = vpe_ring_init_cond_exec,
+ .preempt_ib = vpe_ring_preempt_ib,
+ .begin_use = vpe_ring_begin_use,
+ .end_use = vpe_ring_end_use,
+ .reset = vpe_ring_reset,
+};
+
+static void vpe_set_ring_funcs(struct amdgpu_device *adev)
+{
+ adev->vpe.ring.funcs = &vpe_ring_funcs;
+}
+
+const struct amd_ip_funcs vpe_ip_funcs = {
+ .name = "vpe_v6_1",
+ .early_init = vpe_early_init,
+ .sw_init = vpe_sw_init,
+ .sw_fini = vpe_sw_fini,
+ .hw_init = vpe_hw_init,
+ .hw_fini = vpe_hw_fini,
+ .suspend = vpe_suspend,
+ .resume = vpe_resume,
+ .set_clockgating_state = vpe_set_clockgating_state,
+ .set_powergating_state = vpe_set_powergating_state,
+};
+
+const struct amdgpu_ip_block_version vpe_v6_1_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_VPE,
+ .major = 6,
+ .minor = 1,
+ .rev = 0,
+ .funcs = &vpe_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h
new file mode 100644
index 000000000000..695da740a97e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __AMDGPU_VPE_H__
+#define __AMDGPU_VPE_H__
+
+#include "amdgpu_ring.h"
+#include "amdgpu_irq.h"
+#include "vpe_6_1_fw_if.h"
+
+#define AMDGPU_MAX_VPE_INSTANCES 2
+
+struct amdgpu_vpe;
+
+struct vpe_funcs {
+ uint32_t (*get_reg_offset)(struct amdgpu_vpe *vpe, uint32_t inst, uint32_t offset);
+ int (*set_regs)(struct amdgpu_vpe *vpe);
+ int (*irq_init)(struct amdgpu_vpe *vpe);
+ int (*init_microcode)(struct amdgpu_vpe *vpe);
+ int (*load_microcode)(struct amdgpu_vpe *vpe);
+ int (*ring_init)(struct amdgpu_vpe *vpe);
+ int (*ring_start)(struct amdgpu_vpe *vpe);
+ int (*ring_stop)(struct amdgpu_vpe *vpe);
+ int (*ring_fini)(struct amdgpu_vpe *vpe);
+};
+
+struct vpe_regs {
+ uint32_t queue0_rb_rptr_lo;
+ uint32_t queue0_rb_rptr_hi;
+ uint32_t queue0_rb_wptr_lo;
+ uint32_t queue0_rb_wptr_hi;
+ uint32_t queue0_preempt;
+
+ uint32_t dpm_enable;
+ uint32_t dpm_pratio;
+ uint32_t dpm_request_interval;
+ uint32_t dpm_decision_threshold;
+ uint32_t dpm_busy_clamp_threshold;
+ uint32_t dpm_idle_clamp_threshold;
+ uint32_t dpm_request_lv;
+ uint32_t context_indicator;
+};
+
+struct amdgpu_vpe {
+ struct amdgpu_ring ring;
+ struct amdgpu_irq_src trap_irq;
+
+ const struct vpe_funcs *funcs;
+ struct vpe_regs regs;
+
+ const struct firmware *fw;
+ uint32_t fw_version;
+ uint32_t feature_version;
+
+ struct amdgpu_bo *cmdbuf_obj;
+ uint64_t cmdbuf_gpu_addr;
+ uint32_t *cmdbuf_cpu_addr;
+ struct delayed_work idle_work;
+ bool context_started;
+
+ uint32_t num_instances;
+ bool collaborate_mode;
+ uint32_t supported_reset;
+};
+
+int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev);
+int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe);
+int amdgpu_vpe_ring_init(struct amdgpu_vpe *vpe);
+int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe);
+int amdgpu_vpe_configure_dpm(struct amdgpu_vpe *vpe);
+void amdgpu_vpe_sysfs_reset_mask_fini(struct amdgpu_device *adev);
+int amdgpu_vpe_sysfs_reset_mask_init(struct amdgpu_device *adev);
+
+#define vpe_ring_init(vpe) ((vpe)->funcs->ring_init ? (vpe)->funcs->ring_init((vpe)) : 0)
+#define vpe_ring_start(vpe) ((vpe)->funcs->ring_start ? (vpe)->funcs->ring_start((vpe)) : 0)
+#define vpe_ring_stop(vpe) ((vpe)->funcs->ring_stop ? (vpe)->funcs->ring_stop((vpe)) : 0)
+#define vpe_ring_fini(vpe) ((vpe)->funcs->ring_fini ? (vpe)->funcs->ring_fini((vpe)) : 0)
+
+#define vpe_get_reg_offset(vpe, inst, offset) \
+ ((vpe)->funcs->get_reg_offset ? (vpe)->funcs->get_reg_offset((vpe), (inst), (offset)) : 0)
+#define vpe_set_regs(vpe) \
+ ((vpe)->funcs->set_regs ? (vpe)->funcs->set_regs((vpe)) : 0)
+#define vpe_irq_init(vpe) \
+ ((vpe)->funcs->irq_init ? (vpe)->funcs->irq_init((vpe)) : 0)
+#define vpe_init_microcode(vpe) \
+ ((vpe)->funcs->init_microcode ? (vpe)->funcs->init_microcode((vpe)) : 0)
+#define vpe_load_microcode(vpe) \
+ ((vpe)->funcs->load_microcode ? (vpe)->funcs->load_microcode((vpe)) : 0)
+
+extern const struct amdgpu_ip_block_version vpe_v6_1_ip_block;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index a4831fe0223b..a5adb2ed9b3c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -22,60 +22,407 @@
* Authors: Christian König
*/
-#include <drm/drmP.h>
+#include <linux/dma-mapping.h>
+#include <drm/ttm/ttm_range_manager.h>
+#include <drm/drm_drv.h>
+
#include "amdgpu.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_res_cursor.h"
+#include "atom.h"
+
+#define AMDGPU_MAX_SG_SEGMENT_SIZE (2UL << 30)
-struct amdgpu_vram_mgr {
- struct drm_mm mm;
- spinlock_t lock;
+struct amdgpu_vram_reservation {
+ u64 start;
+ u64 size;
+ struct list_head allocated;
+ struct list_head blocks;
};
+static inline struct amdgpu_vram_mgr *
+to_vram_mgr(struct ttm_resource_manager *man)
+{
+ return container_of(man, struct amdgpu_vram_mgr, manager);
+}
+
+static inline struct amdgpu_device *
+to_amdgpu_device(struct amdgpu_vram_mgr *mgr)
+{
+ return container_of(mgr, struct amdgpu_device, mman.vram_mgr);
+}
+
+static inline struct drm_buddy_block *
+amdgpu_vram_mgr_first_block(struct list_head *list)
+{
+ return list_first_entry_or_null(list, struct drm_buddy_block, link);
+}
+
+static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head)
+{
+ struct drm_buddy_block *block;
+ u64 start, size;
+
+ block = amdgpu_vram_mgr_first_block(head);
+ if (!block)
+ return false;
+
+ while (head != block->link.next) {
+ start = amdgpu_vram_mgr_block_start(block);
+ size = amdgpu_vram_mgr_block_size(block);
+
+ block = list_entry(block->link.next, struct drm_buddy_block, link);
+ if (start + size != amdgpu_vram_mgr_block_start(block))
+ return false;
+ }
+
+ return true;
+}
+
+static inline u64 amdgpu_vram_mgr_blocks_size(struct list_head *head)
+{
+ struct drm_buddy_block *block;
+ u64 size = 0;
+
+ list_for_each_entry(block, head, link)
+ size += amdgpu_vram_mgr_block_size(block);
+
+ return size;
+}
+
/**
- * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
+ * DOC: mem_info_vram_total
*
- * @man: TTM memory type manager
- * @p_size: maximum size of VRAM
+ * The amdgpu driver provides a sysfs API for reporting current total VRAM
+ * available on the device
+ * The file mem_info_vram_total is used for this and returns the total
+ * amount of VRAM in bytes
+ */
+static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ return sysfs_emit(buf, "%llu\n", adev->gmc.real_vram_size);
+}
+
+/**
+ * DOC: mem_info_vis_vram_total
*
- * Allocate and initialize the VRAM manager.
+ * The amdgpu driver provides a sysfs API for reporting current total
+ * visible VRAM available on the device
+ * The file mem_info_vis_vram_total is used for this and returns the total
+ * amount of visible VRAM in bytes
+ */
+static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ return sysfs_emit(buf, "%llu\n", adev->gmc.visible_vram_size);
+}
+
+/**
+ * DOC: mem_info_vram_used
+ *
+ * The amdgpu driver provides a sysfs API for reporting current total VRAM
+ * available on the device
+ * The file mem_info_vram_used is used for this and returns the total
+ * amount of currently used VRAM in bytes
+ */
+static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct ttm_resource_manager *man = &adev->mman.vram_mgr.manager;
+
+ return sysfs_emit(buf, "%llu\n", ttm_resource_manager_usage(man));
+}
+
+/**
+ * DOC: mem_info_vis_vram_used
+ *
+ * The amdgpu driver provides a sysfs API for reporting current total of
+ * used visible VRAM
+ * The file mem_info_vis_vram_used is used for this and returns the total
+ * amount of currently used visible VRAM in bytes
+ */
+static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ return sysfs_emit(buf, "%llu\n",
+ amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr));
+}
+
+/**
+ * DOC: mem_info_vram_vendor
+ *
+ * The amdgpu driver provides a sysfs API for reporting the vendor of the
+ * installed VRAM
+ * The file mem_info_vram_vendor is used for this and returns the name of the
+ * vendor.
+ */
+static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ switch (adev->gmc.vram_vendor) {
+ case SAMSUNG:
+ return sysfs_emit(buf, "samsung\n");
+ case INFINEON:
+ return sysfs_emit(buf, "infineon\n");
+ case ELPIDA:
+ return sysfs_emit(buf, "elpida\n");
+ case ETRON:
+ return sysfs_emit(buf, "etron\n");
+ case NANYA:
+ return sysfs_emit(buf, "nanya\n");
+ case HYNIX:
+ return sysfs_emit(buf, "hynix\n");
+ case MOSEL:
+ return sysfs_emit(buf, "mosel\n");
+ case WINBOND:
+ return sysfs_emit(buf, "winbond\n");
+ case ESMT:
+ return sysfs_emit(buf, "esmt\n");
+ case MICRON:
+ return sysfs_emit(buf, "micron\n");
+ default:
+ return sysfs_emit(buf, "unknown\n");
+ }
+}
+
+static DEVICE_ATTR(mem_info_vram_total, S_IRUGO,
+ amdgpu_mem_info_vram_total_show, NULL);
+static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO,
+ amdgpu_mem_info_vis_vram_total_show,NULL);
+static DEVICE_ATTR(mem_info_vram_used, S_IRUGO,
+ amdgpu_mem_info_vram_used_show, NULL);
+static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
+ amdgpu_mem_info_vis_vram_used_show, NULL);
+static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
+ amdgpu_mem_info_vram_vendor, NULL);
+
+static struct attribute *amdgpu_vram_mgr_attributes[] = {
+ &dev_attr_mem_info_vram_total.attr,
+ &dev_attr_mem_info_vis_vram_total.attr,
+ &dev_attr_mem_info_vram_used.attr,
+ &dev_attr_mem_info_vis_vram_used.attr,
+ &dev_attr_mem_info_vram_vendor.attr,
+ NULL
+};
+
+static umode_t amdgpu_vram_attrs_is_visible(struct kobject *kobj,
+ struct attribute *attr, int i)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ if (attr == &dev_attr_mem_info_vram_vendor.attr &&
+ !adev->gmc.vram_vendor)
+ return 0;
+
+ return attr->mode;
+}
+
+const struct attribute_group amdgpu_vram_mgr_attr_group = {
+ .attrs = amdgpu_vram_mgr_attributes,
+ .is_visible = amdgpu_vram_attrs_is_visible
+};
+
+/**
+ * amdgpu_vram_mgr_vis_size - Calculate visible block size
+ *
+ * @adev: amdgpu_device pointer
+ * @block: DRM BUDDY block structure
+ *
+ * Calculate how many bytes of the DRM BUDDY block are inside visible VRAM
+ */
+static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
+ struct drm_buddy_block *block)
+{
+ u64 start = amdgpu_vram_mgr_block_start(block);
+ u64 end = start + amdgpu_vram_mgr_block_size(block);
+
+ if (start >= adev->gmc.visible_vram_size)
+ return 0;
+
+ return (end > adev->gmc.visible_vram_size ?
+ adev->gmc.visible_vram_size : end) - start;
+}
+
+/**
+ * amdgpu_vram_mgr_bo_visible_size - CPU visible BO size
+ *
+ * @bo: &amdgpu_bo buffer object (must be in VRAM)
+ *
+ * Returns:
+ * How much of the given &amdgpu_bo buffer object lies in CPU visible VRAM.
+ */
+u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_resource *res = bo->tbo.resource;
+ struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
+ struct drm_buddy_block *block;
+ u64 usage = 0;
+
+ if (amdgpu_gmc_vram_full_visible(&adev->gmc))
+ return amdgpu_bo_size(bo);
+
+ if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
+ return 0;
+
+ list_for_each_entry(block, &vres->blocks, link)
+ usage += amdgpu_vram_mgr_vis_size(adev, block);
+
+ return usage;
+}
+
+/* Commit the reservation of VRAM pages */
+static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
+{
+ struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
+ struct amdgpu_device *adev = to_amdgpu_device(mgr);
+ struct drm_buddy *mm = &mgr->mm;
+ struct amdgpu_vram_reservation *rsv, *temp;
+ struct drm_buddy_block *block;
+ uint64_t vis_usage;
+
+ list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) {
+ if (drm_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size,
+ rsv->size, mm->chunk_size, &rsv->allocated,
+ DRM_BUDDY_RANGE_ALLOCATION))
+ continue;
+
+ block = amdgpu_vram_mgr_first_block(&rsv->allocated);
+ if (!block)
+ continue;
+
+ dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n",
+ rsv->start, rsv->size);
+
+ vis_usage = amdgpu_vram_mgr_vis_size(adev, block);
+ atomic64_add(vis_usage, &mgr->vis_usage);
+ spin_lock(&man->bdev->lru_lock);
+ man->usage += rsv->size;
+ spin_unlock(&man->bdev->lru_lock);
+ list_move(&rsv->blocks, &mgr->reserved_pages);
+ }
+}
+
+/**
+ * amdgpu_vram_mgr_reserve_range - Reserve a range from VRAM
+ *
+ * @mgr: amdgpu_vram_mgr pointer
+ * @start: start address of the range in VRAM
+ * @size: size of the range
+ *
+ * Reserve memory from start address with the specified size in VRAM
*/
-static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
- unsigned long p_size)
+int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
+ uint64_t start, uint64_t size)
{
- struct amdgpu_vram_mgr *mgr;
+ struct amdgpu_vram_reservation *rsv;
- mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
- if (!mgr)
+ rsv = kzalloc(sizeof(*rsv), GFP_KERNEL);
+ if (!rsv)
return -ENOMEM;
- drm_mm_init(&mgr->mm, 0, p_size);
- spin_lock_init(&mgr->lock);
- man->priv = mgr;
+ INIT_LIST_HEAD(&rsv->allocated);
+ INIT_LIST_HEAD(&rsv->blocks);
+
+ rsv->start = start;
+ rsv->size = size;
+
+ mutex_lock(&mgr->lock);
+ list_add_tail(&rsv->blocks, &mgr->reservations_pending);
+ amdgpu_vram_mgr_do_reserve(&mgr->manager);
+ mutex_unlock(&mgr->lock);
+
return 0;
}
/**
- * amdgpu_vram_mgr_fini - free and destroy VRAM manager
+ * amdgpu_vram_mgr_query_page_status - query the reservation status
*
- * @man: TTM memory type manager
+ * @mgr: amdgpu_vram_mgr pointer
+ * @start: start address of a page in VRAM
*
- * Destroy and free the VRAM manager, returns -EBUSY if ranges are still
- * allocated inside it.
+ * Returns:
+ * -EBUSY: the page is still hold and in pending list
+ * 0: the page has been reserved
+ * -ENOENT: the input page is not a reservation
*/
-static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
+int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
+ uint64_t start)
{
- struct amdgpu_vram_mgr *mgr = man->priv;
+ struct amdgpu_vram_reservation *rsv;
+ int ret;
- spin_lock(&mgr->lock);
- if (!drm_mm_clean(&mgr->mm)) {
- spin_unlock(&mgr->lock);
- return -EBUSY;
+ mutex_lock(&mgr->lock);
+
+ list_for_each_entry(rsv, &mgr->reservations_pending, blocks) {
+ if (rsv->start <= start &&
+ (start < (rsv->start + rsv->size))) {
+ ret = -EBUSY;
+ goto out;
+ }
}
- drm_mm_takedown(&mgr->mm);
- spin_unlock(&mgr->lock);
- kfree(mgr);
- man->priv = NULL;
- return 0;
+ list_for_each_entry(rsv, &mgr->reserved_pages, blocks) {
+ if (rsv->start <= start &&
+ (start < (rsv->start + rsv->size))) {
+ ret = 0;
+ goto out;
+ }
+ }
+
+ ret = -ENOENT;
+out:
+ mutex_unlock(&mgr->lock);
+ return ret;
+}
+
+int amdgpu_vram_mgr_query_address_block_info(struct amdgpu_vram_mgr *mgr,
+ uint64_t address, struct amdgpu_vram_block_info *info)
+{
+ struct amdgpu_vram_mgr_resource *vres;
+ struct drm_buddy_block *block;
+ u64 start, size;
+ int ret = -ENOENT;
+
+ mutex_lock(&mgr->lock);
+ list_for_each_entry(vres, &mgr->allocated_vres_list, vres_node) {
+ list_for_each_entry(block, &vres->blocks, link) {
+ start = amdgpu_vram_mgr_block_start(block);
+ size = amdgpu_vram_mgr_block_size(block);
+ if ((start <= address) && (address < (start + size))) {
+ info->start = start;
+ info->size = size;
+ memcpy(&info->task, &vres->task, sizeof(vres->task));
+ ret = 0;
+ goto out;
+ }
+ }
+ }
+
+out:
+ mutex_unlock(&mgr->lock);
+
+ return ret;
}
/**
@@ -84,145 +431,544 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
* @man: TTM memory type manager
* @tbo: TTM BO we need this range for
* @place: placement flags and restrictions
- * @mem: the resulting mem object
+ * @res: the resulting mem object
*
* Allocate VRAM for the given BO.
*/
-static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
+static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
struct ttm_buffer_object *tbo,
const struct ttm_place *place,
- struct ttm_mem_reg *mem)
-{
- struct amdgpu_vram_mgr *mgr = man->priv;
- struct drm_mm *mm = &mgr->mm;
- struct drm_mm_node *nodes;
- enum drm_mm_insert_mode mode;
- unsigned long lpfn, num_nodes, pages_per_node, pages_left;
- unsigned i;
+ struct ttm_resource **res)
+{
+ struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
+ struct amdgpu_device *adev = to_amdgpu_device(mgr);
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
+ u64 vis_usage = 0, max_bytes, min_block_size;
+ struct amdgpu_vram_mgr_resource *vres;
+ u64 size, remaining_size, lpfn, fpfn;
+ unsigned int adjust_dcc_size = 0;
+ struct drm_buddy *mm = &mgr->mm;
+ struct drm_buddy_block *block;
+ unsigned long pages_per_block;
int r;
- lpfn = place->lpfn;
- if (!lpfn)
+ lpfn = (u64)place->lpfn << PAGE_SHIFT;
+ if (!lpfn || lpfn > man->size)
lpfn = man->size;
- if (place->flags & TTM_PL_FLAG_CONTIGUOUS ||
- amdgpu_vram_page_split == -1) {
- pages_per_node = ~0ul;
- num_nodes = 1;
+ fpfn = (u64)place->fpfn << PAGE_SHIFT;
+
+ max_bytes = adev->gmc.mc_vram_size;
+ if (tbo->type != ttm_bo_type_kernel)
+ max_bytes -= AMDGPU_VM_RESERVED_VRAM;
+
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) {
+ pages_per_block = ~0ul;
} else {
- pages_per_node = max((uint32_t)amdgpu_vram_page_split,
- mem->page_alignment);
- num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ pages_per_block = HPAGE_PMD_NR;
+#else
+ /* default to 2MB */
+ pages_per_block = 2UL << (20UL - PAGE_SHIFT);
+#endif
+ pages_per_block = max_t(u32, pages_per_block,
+ tbo->page_alignment);
}
- nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL);
- if (!nodes)
+ vres = kzalloc(sizeof(*vres), GFP_KERNEL);
+ if (!vres)
return -ENOMEM;
- mode = DRM_MM_INSERT_BEST;
+ ttm_resource_init(tbo, place, &vres->base);
+
+ /* bail out quickly if there's likely not enough VRAM for this BO */
+ if (ttm_resource_manager_usage(man) > max_bytes) {
+ r = -ENOSPC;
+ goto error_fini;
+ }
+
+ INIT_LIST_HEAD(&vres->blocks);
+
if (place->flags & TTM_PL_FLAG_TOPDOWN)
- mode = DRM_MM_INSERT_HIGH;
+ vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
- mem->start = 0;
- pages_left = mem->num_pages;
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
+ vres->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION;
- spin_lock(&mgr->lock);
- for (i = 0; i < num_nodes; ++i) {
- unsigned long pages = min(pages_left, pages_per_node);
- uint32_t alignment = mem->page_alignment;
- unsigned long start;
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED)
+ vres->flags |= DRM_BUDDY_CLEAR_ALLOCATION;
+
+ if (fpfn || lpfn != mgr->mm.size)
+ /* Allocate blocks in desired range */
+ vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
- if (pages == pages_per_node)
- alignment = pages_per_node;
+ if (bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC &&
+ adev->gmc.gmc_funcs->get_dcc_alignment)
+ adjust_dcc_size = amdgpu_gmc_get_dcc_alignment(adev);
+
+ remaining_size = (u64)vres->base.size;
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
+ unsigned int dcc_size;
+
+ dcc_size = roundup_pow_of_two(vres->base.size + adjust_dcc_size);
+ remaining_size = (u64)dcc_size;
+
+ vres->flags |= DRM_BUDDY_TRIM_DISABLE;
+ }
+
+ mutex_lock(&mgr->lock);
+ while (remaining_size) {
+ if (tbo->page_alignment)
+ min_block_size = (u64)tbo->page_alignment << PAGE_SHIFT;
+ else
+ min_block_size = mgr->default_page_size;
+
+ size = remaining_size;
+
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size)
+ min_block_size = size;
+ else if ((size >= (u64)pages_per_block << PAGE_SHIFT) &&
+ !(size & (((u64)pages_per_block << PAGE_SHIFT) - 1)))
+ min_block_size = (u64)pages_per_block << PAGE_SHIFT;
+
+ BUG_ON(min_block_size < mm->chunk_size);
+
+ r = drm_buddy_alloc_blocks(mm, fpfn,
+ lpfn,
+ size,
+ min_block_size,
+ &vres->blocks,
+ vres->flags);
+
+ if (unlikely(r == -ENOSPC) && pages_per_block == ~0ul &&
+ !(place->flags & TTM_PL_FLAG_CONTIGUOUS)) {
+ vres->flags &= ~DRM_BUDDY_CONTIGUOUS_ALLOCATION;
+ pages_per_block = max_t(u32, 2UL << (20UL - PAGE_SHIFT),
+ tbo->page_alignment);
+
+ continue;
+ }
- r = drm_mm_insert_node_in_range(mm, &nodes[i],
- pages, alignment, 0,
- place->fpfn, lpfn,
- mode);
if (unlikely(r))
- goto error;
-
- /* Calculate a virtual BO start address to easily check if
- * everything is CPU accessible.
- */
- start = nodes[i].start + nodes[i].size;
- if (start > mem->num_pages)
- start -= mem->num_pages;
+ goto error_free_blocks;
+
+ if (size > remaining_size)
+ remaining_size = 0;
+ else
+ remaining_size -= size;
+ }
+
+ vres->task.pid = task_pid_nr(current);
+ get_task_comm(vres->task.comm, current);
+ list_add_tail(&vres->vres_node, &mgr->allocated_vres_list);
+
+ if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
+ struct drm_buddy_block *dcc_block;
+ unsigned long dcc_start;
+ u64 trim_start;
+
+ dcc_block = amdgpu_vram_mgr_first_block(&vres->blocks);
+ /* Adjust the start address for DCC buffers only */
+ dcc_start =
+ roundup((unsigned long)amdgpu_vram_mgr_block_start(dcc_block),
+ adjust_dcc_size);
+ trim_start = (u64)dcc_start;
+ drm_buddy_block_trim(mm, &trim_start,
+ (u64)vres->base.size,
+ &vres->blocks);
+ }
+ mutex_unlock(&mgr->lock);
+
+ vres->base.start = 0;
+ size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks),
+ vres->base.size);
+ list_for_each_entry(block, &vres->blocks, link) {
+ unsigned long start;
+
+ start = amdgpu_vram_mgr_block_start(block) +
+ amdgpu_vram_mgr_block_size(block);
+ start >>= PAGE_SHIFT;
+
+ if (start > PFN_UP(size))
+ start -= PFN_UP(size);
else
start = 0;
- mem->start = max(mem->start, start);
- pages_left -= pages;
+ vres->base.start = max(vres->base.start, start);
+
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
}
- spin_unlock(&mgr->lock);
- mem->mm_node = nodes;
+ if (amdgpu_is_vram_mgr_blocks_contiguous(&vres->blocks))
+ vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
+
+ if (adev->gmc.xgmi.connected_to_cpu)
+ vres->base.bus.caching = ttm_cached;
+ else
+ vres->base.bus.caching = ttm_write_combined;
+ atomic64_add(vis_usage, &mgr->vis_usage);
+ *res = &vres->base;
return 0;
-error:
- while (i--)
- drm_mm_remove_node(&nodes[i]);
- spin_unlock(&mgr->lock);
+error_free_blocks:
+ drm_buddy_free_list(mm, &vres->blocks, 0);
+ mutex_unlock(&mgr->lock);
+error_fini:
+ ttm_resource_fini(man, &vres->base);
+ kfree(vres);
- kfree(nodes);
- return r == -ENOSPC ? 0 : r;
+ return r;
}
/**
* amdgpu_vram_mgr_del - free ranges
*
* @man: TTM memory type manager
- * @tbo: TTM BO we need this range for
- * @place: placement flags and restrictions
- * @mem: TTM memory object
+ * @res: TTM memory object
*
* Free the allocated VRAM again.
*/
-static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
- struct ttm_mem_reg *mem)
+static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
+ struct ttm_resource *res)
{
- struct amdgpu_vram_mgr *mgr = man->priv;
- struct drm_mm_node *nodes = mem->mm_node;
- unsigned pages = mem->num_pages;
+ struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
+ struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
+ struct amdgpu_device *adev = to_amdgpu_device(mgr);
+ struct drm_buddy *mm = &mgr->mm;
+ struct drm_buddy_block *block;
+ uint64_t vis_usage = 0;
- if (!mem->mm_node)
- return;
+ mutex_lock(&mgr->lock);
+
+ list_del(&vres->vres_node);
+ memset(&vres->task, 0, sizeof(vres->task));
+
+ list_for_each_entry(block, &vres->blocks, link)
+ vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
+
+ drm_buddy_free_list(mm, &vres->blocks, vres->flags);
+ amdgpu_vram_mgr_do_reserve(man);
+ mutex_unlock(&mgr->lock);
+
+ atomic64_sub(vis_usage, &mgr->vis_usage);
+
+ ttm_resource_fini(man, res);
+ kfree(vres);
+}
+
+/**
+ * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
+ *
+ * @adev: amdgpu device pointer
+ * @res: TTM memory object
+ * @offset: byte offset from the base of VRAM BO
+ * @length: number of bytes to export in sg_table
+ * @dev: the other device
+ * @dir: dma direction
+ * @sgt: resulting sg table
+ *
+ * Allocate and fill a sg table from a VRAM allocation.
+ */
+int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
+ struct ttm_resource *res,
+ u64 offset, u64 length,
+ struct device *dev,
+ enum dma_data_direction dir,
+ struct sg_table **sgt)
+{
+ struct amdgpu_res_cursor cursor;
+ struct scatterlist *sg;
+ int num_entries = 0;
+ int i, r;
+
+ *sgt = kmalloc(sizeof(**sgt), GFP_KERNEL);
+ if (!*sgt)
+ return -ENOMEM;
+
+ /* Determine the number of DRM_BUDDY blocks to export */
+ amdgpu_res_first(res, offset, length, &cursor);
+ while (cursor.remaining) {
+ num_entries++;
+ amdgpu_res_next(&cursor, min(cursor.size, AMDGPU_MAX_SG_SEGMENT_SIZE));
+ }
+
+ r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
+ if (r)
+ goto error_free;
+
+ /* Initialize scatterlist nodes of sg_table */
+ for_each_sgtable_sg((*sgt), sg, i)
+ sg->length = 0;
+
+ /*
+ * Walk down DRM_BUDDY blocks to populate scatterlist nodes
+ * @note: Use iterator api to get first the DRM_BUDDY block
+ * and the number of bytes from it. Access the following
+ * DRM_BUDDY block(s) if more buffer needs to exported
+ */
+ amdgpu_res_first(res, offset, length, &cursor);
+ for_each_sgtable_sg((*sgt), sg, i) {
+ phys_addr_t phys = cursor.start + adev->gmc.aper_base;
+ unsigned long size = min(cursor.size, AMDGPU_MAX_SG_SEGMENT_SIZE);
+ dma_addr_t addr;
+
+ addr = dma_map_resource(dev, phys, size, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ r = dma_mapping_error(dev, addr);
+ if (r)
+ goto error_unmap;
+
+ sg_set_page(sg, NULL, size, 0);
+ sg_dma_address(sg) = addr;
+ sg_dma_len(sg) = size;
+
+ amdgpu_res_next(&cursor, size);
+ }
+
+ return 0;
+
+error_unmap:
+ for_each_sgtable_sg((*sgt), sg, i) {
+ if (!sg->length)
+ continue;
- spin_lock(&mgr->lock);
- while (pages) {
- pages -= nodes->size;
- drm_mm_remove_node(nodes);
- ++nodes;
+ dma_unmap_resource(dev, sg->dma_address,
+ sg->length, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
}
- spin_unlock(&mgr->lock);
+ sg_free_table(*sgt);
- kfree(mem->mm_node);
- mem->mm_node = NULL;
+error_free:
+ kfree(*sgt);
+ return r;
+}
+
+/**
+ * amdgpu_vram_mgr_free_sgt - allocate and fill a sg table
+ *
+ * @dev: device pointer
+ * @dir: data direction of resource to unmap
+ * @sgt: sg table to free
+ *
+ * Free a previously allocate sg table.
+ */
+void amdgpu_vram_mgr_free_sgt(struct device *dev,
+ enum dma_data_direction dir,
+ struct sg_table *sgt)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sgtable_sg(sgt, sg, i)
+ dma_unmap_resource(dev, sg->dma_address,
+ sg->length, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
+/**
+ * amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part
+ *
+ * @mgr: amdgpu_vram_mgr pointer
+ *
+ * Returns how many bytes are used in the visible part of VRAM
+ */
+uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr)
+{
+ return atomic64_read(&mgr->vis_usage);
+}
+
+/**
+ * amdgpu_vram_mgr_clear_reset_blocks - reset clear blocks
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Reset the cleared drm buddy blocks.
+ */
+void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev)
+{
+ struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
+ struct drm_buddy *mm = &mgr->mm;
+
+ mutex_lock(&mgr->lock);
+ drm_buddy_reset_clear(mm, false);
+ mutex_unlock(&mgr->lock);
+}
+
+/**
+ * amdgpu_vram_mgr_intersects - test each drm buddy block for intersection
+ *
+ * @man: TTM memory type manager
+ * @res: The resource to test
+ * @place: The place to test against
+ * @size: Size of the new allocation
+ *
+ * Test each drm buddy block for intersection for eviction decision.
+ */
+static bool amdgpu_vram_mgr_intersects(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ struct amdgpu_vram_mgr_resource *mgr = to_amdgpu_vram_mgr_resource(res);
+ struct drm_buddy_block *block;
+
+ /* Check each drm buddy block individually */
+ list_for_each_entry(block, &mgr->blocks, link) {
+ unsigned long fpfn =
+ amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT;
+ unsigned long lpfn = fpfn +
+ (amdgpu_vram_mgr_block_size(block) >> PAGE_SHIFT);
+
+ if (place->fpfn < lpfn &&
+ (!place->lpfn || place->lpfn > fpfn))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * amdgpu_vram_mgr_compatible - test each drm buddy block for compatibility
+ *
+ * @man: TTM memory type manager
+ * @res: The resource to test
+ * @place: The place to test against
+ * @size: Size of the new allocation
+ *
+ * Test each drm buddy block for placement compatibility.
+ */
+static bool amdgpu_vram_mgr_compatible(struct ttm_resource_manager *man,
+ struct ttm_resource *res,
+ const struct ttm_place *place,
+ size_t size)
+{
+ struct amdgpu_vram_mgr_resource *mgr = to_amdgpu_vram_mgr_resource(res);
+ struct drm_buddy_block *block;
+
+ /* Check each drm buddy block individually */
+ list_for_each_entry(block, &mgr->blocks, link) {
+ unsigned long fpfn =
+ amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT;
+ unsigned long lpfn = fpfn +
+ (amdgpu_vram_mgr_block_size(block) >> PAGE_SHIFT);
+
+ if (fpfn < place->fpfn ||
+ (place->lpfn && lpfn > place->lpfn))
+ return false;
+ }
+
+ return true;
}
/**
* amdgpu_vram_mgr_debug - dump VRAM table
*
* @man: TTM memory type manager
- * @prefix: text prefix
+ * @printer: DRM printer to use
*
* Dump the table content using printk.
*/
-static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man,
- const char *prefix)
+static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
+ struct drm_printer *printer)
{
- struct amdgpu_vram_mgr *mgr = man->priv;
- struct drm_printer p = drm_debug_printer(prefix);
+ struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
+ struct drm_buddy *mm = &mgr->mm;
+ struct amdgpu_vram_reservation *rsv;
+
+ drm_printf(printer, " vis usage:%llu\n",
+ amdgpu_vram_mgr_vis_usage(mgr));
+
+ mutex_lock(&mgr->lock);
+ drm_printf(printer, "default_page_size: %lluKiB\n",
+ mgr->default_page_size >> 10);
- spin_lock(&mgr->lock);
- drm_mm_print(&mgr->mm, &p);
- spin_unlock(&mgr->lock);
+ drm_buddy_print(mm, printer);
+
+ drm_printf(printer, "reserved:\n");
+ list_for_each_entry(rsv, &mgr->reserved_pages, blocks)
+ drm_printf(printer, "%#018llx-%#018llx: %llu\n",
+ rsv->start, rsv->start + rsv->size, rsv->size);
+ mutex_unlock(&mgr->lock);
}
-const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = {
- amdgpu_vram_mgr_init,
- amdgpu_vram_mgr_fini,
- amdgpu_vram_mgr_new,
- amdgpu_vram_mgr_del,
- amdgpu_vram_mgr_debug
+static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
+ .alloc = amdgpu_vram_mgr_new,
+ .free = amdgpu_vram_mgr_del,
+ .intersects = amdgpu_vram_mgr_intersects,
+ .compatible = amdgpu_vram_mgr_compatible,
+ .debug = amdgpu_vram_mgr_debug
};
+
+/**
+ * amdgpu_vram_mgr_init - init VRAM manager and DRM MM
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate and initialize the VRAM manager.
+ */
+int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
+ struct ttm_resource_manager *man = &mgr->manager;
+ int err;
+
+ man->cg = drmm_cgroup_register_region(adev_to_drm(adev), "vram", adev->gmc.real_vram_size);
+ if (IS_ERR(man->cg))
+ return PTR_ERR(man->cg);
+ ttm_resource_manager_init(man, &adev->mman.bdev,
+ adev->gmc.real_vram_size);
+
+ mutex_init(&mgr->lock);
+ INIT_LIST_HEAD(&mgr->reservations_pending);
+ INIT_LIST_HEAD(&mgr->reserved_pages);
+ INIT_LIST_HEAD(&mgr->allocated_vres_list);
+ mgr->default_page_size = PAGE_SIZE;
+
+ man->func = &amdgpu_vram_mgr_func;
+ err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
+ if (err)
+ return err;
+
+ ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
+ ttm_resource_manager_set_used(man, true);
+ return 0;
+}
+
+/**
+ * amdgpu_vram_mgr_fini - free and destroy VRAM manager
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Destroy and free the VRAM manager, returns -EBUSY if ranges are still
+ * allocated inside it.
+ */
+void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
+ struct ttm_resource_manager *man = &mgr->manager;
+ int ret;
+ struct amdgpu_vram_reservation *rsv, *temp;
+
+ ttm_resource_manager_set_used(man, false);
+
+ ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
+ if (ret)
+ return;
+
+ mutex_lock(&mgr->lock);
+ list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks)
+ kfree(rsv);
+
+ list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) {
+ drm_buddy_free_list(&mgr->mm, &rsv->allocated, 0);
+ kfree(rsv);
+ }
+ if (!adev->gmc.is_app_apu)
+ drm_buddy_fini(&mgr->mm);
+ mutex_unlock(&mgr->lock);
+
+ ttm_resource_manager_cleanup(man);
+ ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
new file mode 100644
index 000000000000..5f5fd9a911c2
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: MIT
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __AMDGPU_VRAM_MGR_H__
+#define __AMDGPU_VRAM_MGR_H__
+
+#include <drm/drm_buddy.h>
+
+struct amdgpu_vram_mgr {
+ struct ttm_resource_manager manager;
+ struct drm_buddy mm;
+ /* protects access to buffer objects */
+ struct mutex lock;
+ struct list_head reservations_pending;
+ struct list_head reserved_pages;
+ atomic64_t vis_usage;
+ u64 default_page_size;
+ struct list_head allocated_vres_list;
+};
+
+struct amdgpu_vres_task {
+ pid_t pid;
+ char comm[TASK_COMM_LEN];
+};
+
+struct amdgpu_vram_block_info {
+ u64 start;
+ u64 size;
+ struct amdgpu_vres_task task;
+};
+
+struct amdgpu_vram_mgr_resource {
+ struct ttm_resource base;
+ struct list_head blocks;
+ unsigned long flags;
+ struct list_head vres_node;
+ struct amdgpu_vres_task task;
+};
+
+static inline u64 amdgpu_vram_mgr_block_start(struct drm_buddy_block *block)
+{
+ return drm_buddy_block_offset(block);
+}
+
+static inline u64 amdgpu_vram_mgr_block_size(struct drm_buddy_block *block)
+{
+ return (u64)PAGE_SIZE << drm_buddy_block_order(block);
+}
+
+static inline bool amdgpu_vram_mgr_is_cleared(struct drm_buddy_block *block)
+{
+ return drm_buddy_block_is_clear(block);
+}
+
+static inline struct amdgpu_vram_mgr_resource *
+to_amdgpu_vram_mgr_resource(struct ttm_resource *res)
+{
+ return container_of(res, struct amdgpu_vram_mgr_resource, base);
+}
+
+static inline void amdgpu_vram_mgr_set_cleared(struct ttm_resource *res)
+{
+ struct amdgpu_vram_mgr_resource *ares = to_amdgpu_vram_mgr_resource(res);
+
+ WARN_ON(ares->flags & DRM_BUDDY_CLEARED);
+ ares->flags |= DRM_BUDDY_CLEARED;
+}
+
+int amdgpu_vram_mgr_query_address_block_info(struct amdgpu_vram_mgr *mgr,
+ uint64_t address, struct amdgpu_vram_block_info *info);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
new file mode 100644
index 000000000000..1083db8cea2e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
@@ -0,0 +1,1107 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_xcp.h"
+#include "amdgpu_drv.h"
+
+#include <drm/drm_drv.h>
+#include "../amdxcp/amdgpu_xcp_drv.h"
+
+static void amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr *xcp_mgr);
+static void amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr *xcp_mgr);
+
+static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr,
+ struct amdgpu_xcp_ip *xcp_ip, int xcp_state)
+{
+ int (*run_func)(void *handle, uint32_t inst_mask);
+ int ret = 0;
+
+ if (!xcp_ip || !xcp_ip->valid || !xcp_ip->ip_funcs)
+ return 0;
+
+ run_func = NULL;
+
+ switch (xcp_state) {
+ case AMDGPU_XCP_PREPARE_SUSPEND:
+ run_func = xcp_ip->ip_funcs->prepare_suspend;
+ break;
+ case AMDGPU_XCP_SUSPEND:
+ run_func = xcp_ip->ip_funcs->suspend;
+ break;
+ case AMDGPU_XCP_PREPARE_RESUME:
+ run_func = xcp_ip->ip_funcs->prepare_resume;
+ break;
+ case AMDGPU_XCP_RESUME:
+ run_func = xcp_ip->ip_funcs->resume;
+ break;
+ }
+
+ if (run_func)
+ ret = run_func(xcp_mgr->adev, xcp_ip->inst_mask);
+
+ return ret;
+}
+
+static int amdgpu_xcp_run_transition(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
+ int state)
+{
+ struct amdgpu_xcp_ip *xcp_ip;
+ struct amdgpu_xcp *xcp;
+ int i, ret;
+
+ if (xcp_id >= MAX_XCP || !xcp_mgr->xcp[xcp_id].valid)
+ return -EINVAL;
+
+ xcp = &xcp_mgr->xcp[xcp_id];
+ for (i = 0; i < AMDGPU_XCP_MAX_BLOCKS; ++i) {
+ xcp_ip = &xcp->ip[i];
+ ret = __amdgpu_xcp_run(xcp_mgr, xcp_ip, state);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
+{
+ return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
+ AMDGPU_XCP_PREPARE_SUSPEND);
+}
+
+int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
+{
+ return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_SUSPEND);
+}
+
+int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
+{
+ return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
+ AMDGPU_XCP_PREPARE_RESUME);
+}
+
+int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
+{
+ return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_RESUME);
+}
+
+static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
+ struct amdgpu_xcp_ip *ip)
+{
+ struct amdgpu_xcp *xcp;
+
+ if (!ip)
+ return;
+
+ xcp = &xcp_mgr->xcp[xcp_id];
+ xcp->ip[ip->ip_id] = *ip;
+ xcp->ip[ip->ip_id].valid = true;
+
+ xcp->valid = true;
+}
+
+static void __amdgpu_xcp_set_unique_id(struct amdgpu_xcp_mgr *xcp_mgr,
+ int xcp_id)
+{
+ struct amdgpu_xcp *xcp = &xcp_mgr->xcp[xcp_id];
+ struct amdgpu_device *adev = xcp_mgr->adev;
+ uint32_t inst_mask;
+ uint64_t uid;
+ int i;
+
+ if (!amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &inst_mask) &&
+ inst_mask) {
+ i = GET_INST(GC, (ffs(inst_mask) - 1));
+ uid = amdgpu_device_get_uid(xcp_mgr->adev->uid_info,
+ AMDGPU_UID_TYPE_XCD, i);
+ if (uid)
+ xcp->unique_id = uid;
+ }
+}
+
+int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
+{
+ struct amdgpu_device *adev = xcp_mgr->adev;
+ struct amdgpu_xcp_ip ip;
+ uint8_t mem_id;
+ int i, j, ret;
+
+ if (!num_xcps || num_xcps > MAX_XCP)
+ return -EINVAL;
+
+ xcp_mgr->mode = mode;
+
+ for (i = 0; i < MAX_XCP; ++i)
+ xcp_mgr->xcp[i].valid = false;
+
+ /* This is needed for figuring out memory id of xcp */
+ xcp_mgr->num_xcp_per_mem_partition = num_xcps / xcp_mgr->adev->gmc.num_mem_partitions;
+
+ for (i = 0; i < num_xcps; ++i) {
+ for (j = AMDGPU_XCP_GFXHUB; j < AMDGPU_XCP_MAX_BLOCKS; ++j) {
+ ret = xcp_mgr->funcs->get_ip_details(xcp_mgr, i, j,
+ &ip);
+ if (ret)
+ continue;
+
+ __amdgpu_xcp_add_block(xcp_mgr, i, &ip);
+ }
+
+ xcp_mgr->xcp[i].id = i;
+
+ if (xcp_mgr->funcs->get_xcp_mem_id) {
+ ret = xcp_mgr->funcs->get_xcp_mem_id(
+ xcp_mgr, &xcp_mgr->xcp[i], &mem_id);
+ if (ret)
+ continue;
+ else
+ xcp_mgr->xcp[i].mem_id = mem_id;
+ }
+ __amdgpu_xcp_set_unique_id(xcp_mgr, i);
+ }
+
+ xcp_mgr->num_xcps = num_xcps;
+ amdgpu_xcp_update_partition_sched_list(adev);
+
+ return 0;
+}
+
+static int __amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
+ int mode)
+{
+ int ret, curr_mode, num_xcps = 0;
+
+ if (!xcp_mgr->funcs || !xcp_mgr->funcs->switch_partition_mode)
+ return 0;
+
+ mutex_lock(&xcp_mgr->xcp_lock);
+
+ curr_mode = xcp_mgr->mode;
+ /* State set to transient mode */
+ xcp_mgr->mode = AMDGPU_XCP_MODE_TRANS;
+
+ ret = xcp_mgr->funcs->switch_partition_mode(xcp_mgr, mode, &num_xcps);
+
+ if (ret) {
+ /* Failed, get whatever mode it's at now */
+ if (xcp_mgr->funcs->query_partition_mode)
+ xcp_mgr->mode = amdgpu_xcp_query_partition_mode(
+ xcp_mgr, AMDGPU_XCP_FL_LOCKED);
+ else
+ xcp_mgr->mode = curr_mode;
+
+ goto out;
+ }
+ amdgpu_xcp_sysfs_entries_update(xcp_mgr);
+out:
+ mutex_unlock(&xcp_mgr->xcp_lock);
+
+ return ret;
+}
+
+int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
+{
+ if (!xcp_mgr || mode == AMDGPU_XCP_MODE_NONE)
+ return -EINVAL;
+
+ if (xcp_mgr->mode == mode)
+ return 0;
+
+ return __amdgpu_xcp_switch_partition_mode(xcp_mgr, mode);
+}
+
+int amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ if (!xcp_mgr || xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
+ return 0;
+
+ return __amdgpu_xcp_switch_partition_mode(xcp_mgr, xcp_mgr->mode);
+}
+
+static bool __amdgpu_xcp_is_cached_mode_valid(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
+ return true;
+
+ if (!amdgpu_sriov_vf(xcp_mgr->adev) &&
+ xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
+ return true;
+
+ if (xcp_mgr->mode != AMDGPU_XCP_MODE_NONE &&
+ xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS)
+ return true;
+
+ return false;
+}
+
+int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+{
+ int mode;
+
+ if (__amdgpu_xcp_is_cached_mode_valid(xcp_mgr))
+ return xcp_mgr->mode;
+
+ if (!(flags & AMDGPU_XCP_FL_LOCKED))
+ mutex_lock(&xcp_mgr->xcp_lock);
+ mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr);
+
+ /* First time query for VF, set the mode here */
+ if (amdgpu_sriov_vf(xcp_mgr->adev) &&
+ xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
+ xcp_mgr->mode = mode;
+
+ if (xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS && mode != xcp_mgr->mode)
+ dev_WARN(
+ xcp_mgr->adev->dev,
+ "Cached partition mode %d not matching with device mode %d",
+ xcp_mgr->mode, mode);
+
+ if (!(flags & AMDGPU_XCP_FL_LOCKED))
+ mutex_unlock(&xcp_mgr->xcp_lock);
+
+ return mode;
+}
+
+static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev)
+{
+ struct drm_device *p_ddev;
+ struct drm_device *ddev;
+ int i, ret;
+
+ ddev = adev_to_drm(adev);
+
+ /* xcp #0 shares drm device setting with adev */
+ adev->xcp_mgr->xcp->ddev = ddev;
+
+ for (i = 1; i < MAX_XCP; i++) {
+ ret = amdgpu_xcp_drm_dev_alloc(&p_ddev);
+ if (ret == -ENOSPC) {
+ dev_warn(adev->dev,
+ "Skip xcp node #%d when out of drm node resource.", i);
+ ret = 0;
+ goto out;
+ } else if (ret) {
+ goto out;
+ }
+
+ /* Redirect all IOCTLs to the primary device */
+ adev->xcp_mgr->xcp[i].rdev = p_ddev->render->dev;
+ adev->xcp_mgr->xcp[i].pdev = p_ddev->primary->dev;
+ adev->xcp_mgr->xcp[i].driver = (struct drm_driver *)p_ddev->driver;
+ adev->xcp_mgr->xcp[i].vma_offset_manager = p_ddev->vma_offset_manager;
+ p_ddev->render->dev = ddev;
+ p_ddev->primary->dev = ddev;
+ p_ddev->vma_offset_manager = ddev->vma_offset_manager;
+ p_ddev->driver = &amdgpu_partition_driver;
+ adev->xcp_mgr->xcp[i].ddev = p_ddev;
+
+ dev_set_drvdata(p_ddev->dev, &adev->xcp_mgr->xcp[i]);
+ }
+ ret = 0;
+out:
+ amdgpu_xcp_sysfs_entries_init(adev->xcp_mgr);
+
+ return ret;
+}
+
+int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
+ int init_num_xcps,
+ struct amdgpu_xcp_mgr_funcs *xcp_funcs)
+{
+ struct amdgpu_xcp_mgr *xcp_mgr;
+ int i;
+
+ if (!xcp_funcs || !xcp_funcs->get_ip_details)
+ return -EINVAL;
+
+ xcp_mgr = kzalloc(sizeof(*xcp_mgr), GFP_KERNEL);
+
+ if (!xcp_mgr)
+ return -ENOMEM;
+
+ xcp_mgr->adev = adev;
+ xcp_mgr->funcs = xcp_funcs;
+ xcp_mgr->mode = init_mode;
+ mutex_init(&xcp_mgr->xcp_lock);
+
+ if (init_mode != AMDGPU_XCP_MODE_NONE)
+ amdgpu_xcp_init(xcp_mgr, init_num_xcps, init_mode);
+
+ adev->xcp_mgr = xcp_mgr;
+ for (i = 0; i < MAX_XCP; ++i)
+ xcp_mgr->xcp[i].xcp_mgr = xcp_mgr;
+
+ return amdgpu_xcp_dev_alloc(adev);
+}
+
+int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr,
+ enum AMDGPU_XCP_IP_BLOCK ip, int instance)
+{
+ struct amdgpu_xcp *xcp;
+ int i, id_mask = 0;
+
+ if (ip >= AMDGPU_XCP_MAX_BLOCKS)
+ return -EINVAL;
+
+ for (i = 0; i < xcp_mgr->num_xcps; ++i) {
+ xcp = &xcp_mgr->xcp[i];
+ if ((xcp->valid) && (xcp->ip[ip].valid) &&
+ (xcp->ip[ip].inst_mask & BIT(instance)))
+ id_mask |= BIT(i);
+ }
+
+ if (!id_mask)
+ id_mask = -ENXIO;
+
+ return id_mask;
+}
+
+int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp,
+ enum AMDGPU_XCP_IP_BLOCK ip,
+ uint32_t *inst_mask)
+{
+ if (!xcp->valid || !inst_mask || !(xcp->ip[ip].valid))
+ return -EINVAL;
+
+ *inst_mask = xcp->ip[ip].inst_mask;
+
+ return 0;
+}
+
+int amdgpu_xcp_dev_register(struct amdgpu_device *adev,
+ const struct pci_device_id *ent)
+{
+ int i, ret;
+
+ if (!adev->xcp_mgr)
+ return 0;
+
+ for (i = 1; i < MAX_XCP; i++) {
+ if (!adev->xcp_mgr->xcp[i].ddev)
+ break;
+
+ ret = drm_dev_register(adev->xcp_mgr->xcp[i].ddev, ent->driver_data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev)
+{
+ struct drm_device *p_ddev;
+ int i;
+
+ if (!adev->xcp_mgr)
+ return;
+
+ for (i = 1; i < MAX_XCP; i++) {
+ if (!adev->xcp_mgr->xcp[i].ddev)
+ break;
+
+ p_ddev = adev->xcp_mgr->xcp[i].ddev;
+ drm_dev_unplug(p_ddev);
+ p_ddev->render->dev = adev->xcp_mgr->xcp[i].rdev;
+ p_ddev->primary->dev = adev->xcp_mgr->xcp[i].pdev;
+ p_ddev->driver = adev->xcp_mgr->xcp[i].driver;
+ p_ddev->vma_offset_manager = adev->xcp_mgr->xcp[i].vma_offset_manager;
+ amdgpu_xcp_drm_dev_free(p_ddev);
+ }
+}
+
+int amdgpu_xcp_open_device(struct amdgpu_device *adev,
+ struct amdgpu_fpriv *fpriv,
+ struct drm_file *file_priv)
+{
+ int i;
+
+ if (!adev->xcp_mgr)
+ return 0;
+
+ fpriv->xcp_id = AMDGPU_XCP_NO_PARTITION;
+ for (i = 0; i < MAX_XCP; ++i) {
+ if (!adev->xcp_mgr->xcp[i].ddev)
+ break;
+
+ if (file_priv->minor == adev->xcp_mgr->xcp[i].ddev->render) {
+ if (adev->xcp_mgr->xcp[i].valid == FALSE) {
+ dev_err(adev->dev, "renderD%d partition %d not valid!",
+ file_priv->minor->index, i);
+ return -ENOENT;
+ }
+ dev_dbg(adev->dev, "renderD%d partition %d opened!",
+ file_priv->minor->index, i);
+ fpriv->xcp_id = i;
+ break;
+ }
+ }
+
+ fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 :
+ adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id;
+ return 0;
+}
+
+void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
+ struct amdgpu_ctx_entity *entity)
+{
+ struct drm_gpu_scheduler *sched;
+ struct amdgpu_ring *ring;
+
+ if (!adev->xcp_mgr)
+ return;
+
+ sched = entity->entity.rq->sched;
+ if (drm_sched_wqueue_ready(sched)) {
+ ring = to_amdgpu_ring(entity->entity.rq->sched);
+ atomic_dec(&adev->xcp_mgr->xcp[ring->xcp_id].ref_cnt);
+ }
+}
+
+int amdgpu_xcp_select_scheds(struct amdgpu_device *adev,
+ u32 hw_ip, u32 hw_prio,
+ struct amdgpu_fpriv *fpriv,
+ unsigned int *num_scheds,
+ struct drm_gpu_scheduler ***scheds)
+{
+ u32 sel_xcp_id;
+ int i;
+ struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr;
+
+ if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
+ u32 least_ref_cnt = ~0;
+
+ fpriv->xcp_id = 0;
+ for (i = 0; i < xcp_mgr->num_xcps; i++) {
+ u32 total_ref_cnt;
+
+ total_ref_cnt = atomic_read(&xcp_mgr->xcp[i].ref_cnt);
+ if (total_ref_cnt < least_ref_cnt) {
+ fpriv->xcp_id = i;
+ least_ref_cnt = total_ref_cnt;
+ }
+ }
+ }
+ sel_xcp_id = fpriv->xcp_id;
+
+ if (xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) {
+ *num_scheds =
+ xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds;
+ *scheds =
+ xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched;
+ atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt);
+ dev_dbg(adev->dev, "Selected partition #%d", sel_xcp_id);
+ } else {
+ dev_err(adev->dev, "Failed to schedule partition #%d.", sel_xcp_id);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static void amdgpu_set_xcp_id(struct amdgpu_device *adev,
+ uint32_t inst_idx,
+ struct amdgpu_ring *ring)
+{
+ int xcp_id;
+ enum AMDGPU_XCP_IP_BLOCK ip_blk;
+ uint32_t inst_mask;
+
+ ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
+ adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id;
+ if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) ||
+ (ring->funcs->type == AMDGPU_RING_TYPE_CPER))
+ return;
+
+ inst_mask = 1 << inst_idx;
+
+ switch (ring->funcs->type) {
+ case AMDGPU_HW_IP_GFX:
+ case AMDGPU_RING_TYPE_COMPUTE:
+ case AMDGPU_RING_TYPE_KIQ:
+ ip_blk = AMDGPU_XCP_GFX;
+ break;
+ case AMDGPU_RING_TYPE_SDMA:
+ ip_blk = AMDGPU_XCP_SDMA;
+ break;
+ case AMDGPU_RING_TYPE_VCN_ENC:
+ case AMDGPU_RING_TYPE_VCN_JPEG:
+ ip_blk = AMDGPU_XCP_VCN;
+ break;
+ default:
+ dev_err(adev->dev, "Not support ring type %d!", ring->funcs->type);
+ return;
+ }
+
+ for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) {
+ if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) {
+ ring->xcp_id = xcp_id;
+ dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name,
+ ring->xcp_id);
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
+ adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id;
+ break;
+ }
+ }
+}
+
+static void amdgpu_xcp_gpu_sched_update(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring,
+ unsigned int sel_xcp_id)
+{
+ unsigned int *num_gpu_sched;
+
+ num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id]
+ .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds;
+ adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio]
+ .sched[(*num_gpu_sched)++] = &ring->sched;
+ dev_dbg(adev->dev, "%s :[%d] gpu_sched[%d][%d] = %d",
+ ring->name, sel_xcp_id, ring->funcs->type,
+ ring->hw_prio, *num_gpu_sched);
+}
+
+static int amdgpu_xcp_sched_list_update(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ int i;
+
+ for (i = 0; i < MAX_XCP; i++) {
+ atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
+ memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
+ }
+
+ if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
+ return 0;
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ ring = adev->rings[i];
+ if (!ring || !ring->sched.ready || ring->no_scheduler)
+ continue;
+
+ amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id);
+
+ /* VCN may be shared by two partitions under CPX MODE in certain
+ * configs.
+ */
+ if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
+ (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst))
+ amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
+ }
+
+ return 0;
+}
+
+int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev)
+{
+ int i;
+
+ for (i = 0; i < adev->num_rings; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE ||
+ ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+ amdgpu_set_xcp_id(adev, ring->xcc_id, ring);
+ else
+ amdgpu_set_xcp_id(adev, ring->me, ring);
+ }
+
+ return amdgpu_xcp_sched_list_update(adev);
+}
+
+void amdgpu_xcp_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ struct amdgpu_device *adev = xcp_mgr->adev;
+
+ xcp_mgr->supp_xcp_modes = 0;
+
+ switch (NUM_XCC(adev->gfx.xcc_mask)) {
+ case 8:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_DPX_PARTITION_MODE) |
+ BIT(AMDGPU_QPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 6:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_TPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 4:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_DPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 2:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+ case 1:
+ xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) |
+ BIT(AMDGPU_CPX_PARTITION_MODE);
+ break;
+
+ default:
+ break;
+ }
+}
+
+int amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+{
+ /* TODO:
+ * Stop user queues and threads, and make sure GPU is empty of work.
+ */
+
+ if (flags & AMDGPU_XCP_OPS_KFD)
+ amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
+
+ return 0;
+}
+
+int amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
+{
+ int ret = 0;
+
+ if (flags & AMDGPU_XCP_OPS_KFD) {
+ amdgpu_amdkfd_device_probe(xcp_mgr->adev);
+ amdgpu_amdkfd_device_init(xcp_mgr->adev);
+ /* If KFD init failed, return failure */
+ if (!xcp_mgr->adev->kfd.init_complete)
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+/*====================== xcp sysfs - configuration ======================*/
+#define XCP_CFG_SYSFS_RES_ATTR_SHOW(_name) \
+ static ssize_t amdgpu_xcp_res_sysfs_##_name##_show( \
+ struct amdgpu_xcp_res_details *xcp_res, char *buf) \
+ { \
+ return sysfs_emit(buf, "%d\n", xcp_res->_name); \
+ }
+
+struct amdgpu_xcp_res_sysfs_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct amdgpu_xcp_res_details *xcp_res, char *buf);
+};
+
+#define XCP_CFG_SYSFS_RES_ATTR(_name) \
+ struct amdgpu_xcp_res_sysfs_attribute xcp_res_sysfs_attr_##_name = { \
+ .attr = { .name = __stringify(_name), .mode = 0400 }, \
+ .show = amdgpu_xcp_res_sysfs_##_name##_show, \
+ }
+
+XCP_CFG_SYSFS_RES_ATTR_SHOW(num_inst)
+XCP_CFG_SYSFS_RES_ATTR(num_inst);
+XCP_CFG_SYSFS_RES_ATTR_SHOW(num_shared)
+XCP_CFG_SYSFS_RES_ATTR(num_shared);
+
+#define XCP_CFG_SYSFS_RES_ATTR_PTR(_name) xcp_res_sysfs_attr_##_name.attr
+
+static struct attribute *xcp_cfg_res_sysfs_attrs[] = {
+ &XCP_CFG_SYSFS_RES_ATTR_PTR(num_inst),
+ &XCP_CFG_SYSFS_RES_ATTR_PTR(num_shared), NULL
+};
+
+static const char *xcp_desc[] = {
+ [AMDGPU_SPX_PARTITION_MODE] = "SPX",
+ [AMDGPU_DPX_PARTITION_MODE] = "DPX",
+ [AMDGPU_TPX_PARTITION_MODE] = "TPX",
+ [AMDGPU_QPX_PARTITION_MODE] = "QPX",
+ [AMDGPU_CPX_PARTITION_MODE] = "CPX",
+};
+
+static const char *nps_desc[] = {
+ [UNKNOWN_MEMORY_PARTITION_MODE] = "UNKNOWN",
+ [AMDGPU_NPS1_PARTITION_MODE] = "NPS1",
+ [AMDGPU_NPS2_PARTITION_MODE] = "NPS2",
+ [AMDGPU_NPS3_PARTITION_MODE] = "NPS3",
+ [AMDGPU_NPS4_PARTITION_MODE] = "NPS4",
+ [AMDGPU_NPS6_PARTITION_MODE] = "NPS6",
+ [AMDGPU_NPS8_PARTITION_MODE] = "NPS8",
+};
+
+ATTRIBUTE_GROUPS(xcp_cfg_res_sysfs);
+
+#define to_xcp_attr(x) \
+ container_of(x, struct amdgpu_xcp_res_sysfs_attribute, attr)
+#define to_xcp_res(x) container_of(x, struct amdgpu_xcp_res_details, kobj)
+
+static ssize_t xcp_cfg_res_sysfs_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct amdgpu_xcp_res_sysfs_attribute *attribute;
+ struct amdgpu_xcp_res_details *xcp_res;
+
+ attribute = to_xcp_attr(attr);
+ xcp_res = to_xcp_res(kobj);
+
+ if (!attribute->show)
+ return -EIO;
+
+ return attribute->show(xcp_res, buf);
+}
+
+static const struct sysfs_ops xcp_cfg_res_sysfs_ops = {
+ .show = xcp_cfg_res_sysfs_attr_show,
+};
+
+static const struct kobj_type xcp_cfg_res_sysfs_ktype = {
+ .sysfs_ops = &xcp_cfg_res_sysfs_ops,
+ .default_groups = xcp_cfg_res_sysfs_groups,
+};
+
+const char *xcp_res_names[] = {
+ [AMDGPU_XCP_RES_XCC] = "xcc",
+ [AMDGPU_XCP_RES_DMA] = "dma",
+ [AMDGPU_XCP_RES_DEC] = "dec",
+ [AMDGPU_XCP_RES_JPEG] = "jpeg",
+};
+
+static int amdgpu_xcp_get_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
+ int mode,
+ struct amdgpu_xcp_cfg *xcp_cfg)
+{
+ if (xcp_mgr->funcs && xcp_mgr->funcs->get_xcp_res_info)
+ return xcp_mgr->funcs->get_xcp_res_info(xcp_mgr, mode, xcp_cfg);
+
+ return -EOPNOTSUPP;
+}
+
+#define to_xcp_cfg(x) container_of(x, struct amdgpu_xcp_cfg, kobj)
+static ssize_t supported_xcp_configs_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
+ struct amdgpu_xcp_mgr *xcp_mgr = xcp_cfg->xcp_mgr;
+ int size = 0, mode;
+ char *sep = "";
+
+ if (!xcp_mgr || !xcp_mgr->supp_xcp_modes)
+ return sysfs_emit(buf, "Not supported\n");
+
+ for_each_inst(mode, xcp_mgr->supp_xcp_modes) {
+ size += sysfs_emit_at(buf, size, "%s%s", sep, xcp_desc[mode]);
+ sep = ", ";
+ }
+
+ size += sysfs_emit_at(buf, size, "\n");
+
+ return size;
+}
+
+static ssize_t supported_nps_configs_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
+ int size = 0, mode;
+ char *sep = "";
+
+ if (!xcp_cfg || !xcp_cfg->compatible_nps_modes)
+ return sysfs_emit(buf, "Not supported\n");
+
+ for_each_inst(mode, xcp_cfg->compatible_nps_modes) {
+ size += sysfs_emit_at(buf, size, "%s%s", sep, nps_desc[mode]);
+ sep = ", ";
+ }
+
+ size += sysfs_emit_at(buf, size, "\n");
+
+ return size;
+}
+
+static ssize_t xcp_config_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
+
+ return sysfs_emit(buf, "%s\n",
+ amdgpu_gfx_compute_mode_desc(xcp_cfg->mode));
+}
+
+static ssize_t xcp_config_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
+ int mode, r;
+
+ if (!strncasecmp("SPX", buf, strlen("SPX")))
+ mode = AMDGPU_SPX_PARTITION_MODE;
+ else if (!strncasecmp("DPX", buf, strlen("DPX")))
+ mode = AMDGPU_DPX_PARTITION_MODE;
+ else if (!strncasecmp("TPX", buf, strlen("TPX")))
+ mode = AMDGPU_TPX_PARTITION_MODE;
+ else if (!strncasecmp("QPX", buf, strlen("QPX")))
+ mode = AMDGPU_QPX_PARTITION_MODE;
+ else if (!strncasecmp("CPX", buf, strlen("CPX")))
+ mode = AMDGPU_CPX_PARTITION_MODE;
+ else
+ return -EINVAL;
+
+ r = amdgpu_xcp_get_res_info(xcp_cfg->xcp_mgr, mode, xcp_cfg);
+
+ if (r)
+ return r;
+
+ xcp_cfg->mode = mode;
+ return size;
+}
+
+static struct kobj_attribute xcp_cfg_sysfs_mode =
+ __ATTR_RW_MODE(xcp_config, 0644);
+
+static void xcp_cfg_sysfs_release(struct kobject *kobj)
+{
+ struct amdgpu_xcp_cfg *xcp_cfg = to_xcp_cfg(kobj);
+
+ kfree(xcp_cfg);
+}
+
+static const struct kobj_type xcp_cfg_sysfs_ktype = {
+ .release = xcp_cfg_sysfs_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+static struct kobj_attribute supp_part_sysfs_mode =
+ __ATTR_RO(supported_xcp_configs);
+
+static struct kobj_attribute supp_nps_sysfs_mode =
+ __ATTR_RO(supported_nps_configs);
+
+static const struct attribute *xcp_attrs[] = {
+ &supp_part_sysfs_mode.attr,
+ &xcp_cfg_sysfs_mode.attr,
+ NULL,
+};
+
+static void amdgpu_xcp_cfg_sysfs_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_xcp_res_details *xcp_res;
+ struct amdgpu_xcp_cfg *xcp_cfg;
+ int i, r, j, rid, mode;
+
+ if (!adev->xcp_mgr)
+ return;
+
+ xcp_cfg = kzalloc(sizeof(*xcp_cfg), GFP_KERNEL);
+ if (!xcp_cfg)
+ return;
+ xcp_cfg->xcp_mgr = adev->xcp_mgr;
+
+ r = kobject_init_and_add(&xcp_cfg->kobj, &xcp_cfg_sysfs_ktype,
+ &adev->dev->kobj, "compute_partition_config");
+ if (r)
+ goto err1;
+
+ r = sysfs_create_files(&xcp_cfg->kobj, xcp_attrs);
+ if (r)
+ goto err1;
+
+ if (adev->gmc.supported_nps_modes != 0) {
+ r = sysfs_create_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
+ if (r) {
+ sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
+ goto err1;
+ }
+ }
+
+ mode = (xcp_cfg->xcp_mgr->mode ==
+ AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) ?
+ AMDGPU_SPX_PARTITION_MODE :
+ xcp_cfg->xcp_mgr->mode;
+ r = amdgpu_xcp_get_res_info(xcp_cfg->xcp_mgr, mode, xcp_cfg);
+ if (r) {
+ sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
+ sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
+ goto err1;
+ }
+
+ xcp_cfg->mode = mode;
+ for (i = 0; i < xcp_cfg->num_res; i++) {
+ xcp_res = &xcp_cfg->xcp_res[i];
+ rid = xcp_res->id;
+ r = kobject_init_and_add(&xcp_res->kobj,
+ &xcp_cfg_res_sysfs_ktype,
+ &xcp_cfg->kobj, "%s",
+ xcp_res_names[rid]);
+ if (r)
+ goto err;
+ }
+
+ adev->xcp_mgr->xcp_cfg = xcp_cfg;
+ return;
+err:
+ for (j = 0; j < i; j++) {
+ xcp_res = &xcp_cfg->xcp_res[i];
+ kobject_put(&xcp_res->kobj);
+ }
+
+ sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
+ sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
+err1:
+ kobject_put(&xcp_cfg->kobj);
+}
+
+static void amdgpu_xcp_cfg_sysfs_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_xcp_res_details *xcp_res;
+ struct amdgpu_xcp_cfg *xcp_cfg;
+ int i;
+
+ if (!adev->xcp_mgr || !adev->xcp_mgr->xcp_cfg)
+ return;
+
+ xcp_cfg = adev->xcp_mgr->xcp_cfg;
+ for (i = 0; i < xcp_cfg->num_res; i++) {
+ xcp_res = &xcp_cfg->xcp_res[i];
+ kobject_put(&xcp_res->kobj);
+ }
+
+ sysfs_remove_file(&xcp_cfg->kobj, &supp_nps_sysfs_mode.attr);
+ sysfs_remove_files(&xcp_cfg->kobj, xcp_attrs);
+ kobject_put(&xcp_cfg->kobj);
+}
+
+/*====================== xcp sysfs - data entries ======================*/
+
+#define to_xcp(x) container_of(x, struct amdgpu_xcp, kobj)
+
+static ssize_t xcp_metrics_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct amdgpu_xcp *xcp = to_xcp(kobj);
+ struct amdgpu_xcp_mgr *xcp_mgr;
+ ssize_t size;
+
+ xcp_mgr = xcp->xcp_mgr;
+ size = amdgpu_dpm_get_xcp_metrics(xcp_mgr->adev, xcp->id, NULL);
+ if (size <= 0)
+ return size;
+
+ if (size > PAGE_SIZE)
+ return -ENOSPC;
+
+ return amdgpu_dpm_get_xcp_metrics(xcp_mgr->adev, xcp->id, buf);
+}
+
+static umode_t amdgpu_xcp_attrs_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct amdgpu_xcp *xcp = to_xcp(kobj);
+
+ if (!xcp || !xcp->valid)
+ return 0;
+
+ return attr->mode;
+}
+
+static struct kobj_attribute xcp_sysfs_metrics = __ATTR_RO(xcp_metrics);
+
+static struct attribute *amdgpu_xcp_attrs[] = {
+ &xcp_sysfs_metrics.attr,
+ NULL,
+};
+
+static const struct attribute_group amdgpu_xcp_attrs_group = {
+ .attrs = amdgpu_xcp_attrs,
+ .is_visible = amdgpu_xcp_attrs_is_visible
+};
+
+static const struct kobj_type xcp_sysfs_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+};
+
+static void amdgpu_xcp_sysfs_entries_fini(struct amdgpu_xcp_mgr *xcp_mgr, int n)
+{
+ struct amdgpu_xcp *xcp;
+
+ for (n--; n >= 0; n--) {
+ xcp = &xcp_mgr->xcp[n];
+ if (!xcp->ddev || !xcp->valid)
+ continue;
+ sysfs_remove_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
+ kobject_put(&xcp->kobj);
+ }
+}
+
+static void amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ struct amdgpu_xcp *xcp;
+ int i, r;
+
+ for (i = 0; i < MAX_XCP; i++) {
+ /* Redirect all IOCTLs to the primary device */
+ xcp = &xcp_mgr->xcp[i];
+ if (!xcp->ddev)
+ break;
+ r = kobject_init_and_add(&xcp->kobj, &xcp_sysfs_ktype,
+ &xcp->ddev->dev->kobj, "xcp");
+ if (r)
+ goto out;
+
+ r = sysfs_create_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
+ if (r)
+ goto out;
+ }
+
+ return;
+out:
+ kobject_put(&xcp->kobj);
+}
+
+static void amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ struct amdgpu_xcp *xcp;
+ int i;
+
+ for (i = 0; i < MAX_XCP; i++) {
+ /* Redirect all IOCTLs to the primary device */
+ xcp = &xcp_mgr->xcp[i];
+ if (!xcp->ddev)
+ continue;
+ sysfs_update_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
+ }
+
+ return;
+}
+
+void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev)
+{
+ if (!adev->xcp_mgr)
+ return;
+
+ amdgpu_xcp_cfg_sysfs_init(adev);
+
+ return;
+}
+
+void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev)
+{
+ if (!adev->xcp_mgr)
+ return;
+ amdgpu_xcp_sysfs_entries_fini(adev->xcp_mgr, MAX_XCP);
+ amdgpu_xcp_cfg_sysfs_fini(adev);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
new file mode 100644
index 000000000000..1928d9e224fc
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGPU_XCP_H
+#define AMDGPU_XCP_H
+
+#include <linux/pci.h>
+#include <linux/xarray.h>
+
+#include "amdgpu_ctx.h"
+
+#define MAX_XCP 8
+
+#define AMDGPU_XCP_MODE_NONE -1
+#define AMDGPU_XCP_MODE_TRANS -2
+
+#define AMDGPU_XCP_FL_NONE 0
+#define AMDGPU_XCP_FL_LOCKED (1 << 0)
+
+#define AMDGPU_XCP_NO_PARTITION (~0)
+
+#define AMDGPU_XCP_OPS_KFD (1 << 0)
+
+struct amdgpu_fpriv;
+
+enum AMDGPU_XCP_IP_BLOCK {
+ AMDGPU_XCP_GFXHUB,
+ AMDGPU_XCP_GFX,
+ AMDGPU_XCP_SDMA,
+ AMDGPU_XCP_VCN,
+ AMDGPU_XCP_MAX_BLOCKS
+};
+
+enum AMDGPU_XCP_STATE {
+ AMDGPU_XCP_PREPARE_SUSPEND,
+ AMDGPU_XCP_SUSPEND,
+ AMDGPU_XCP_PREPARE_RESUME,
+ AMDGPU_XCP_RESUME,
+};
+
+enum amdgpu_xcp_res_id {
+ AMDGPU_XCP_RES_XCC,
+ AMDGPU_XCP_RES_DMA,
+ AMDGPU_XCP_RES_DEC,
+ AMDGPU_XCP_RES_JPEG,
+ AMDGPU_XCP_RES_MAX,
+};
+
+struct amdgpu_xcp_res_details {
+ enum amdgpu_xcp_res_id id;
+ u8 num_inst;
+ u8 num_shared;
+ struct kobject kobj;
+};
+
+struct amdgpu_xcp_cfg {
+ u8 mode;
+ struct amdgpu_xcp_res_details xcp_res[AMDGPU_XCP_RES_MAX];
+ u8 num_res;
+ struct amdgpu_xcp_mgr *xcp_mgr;
+ struct kobject kobj;
+ u16 compatible_nps_modes;
+};
+
+struct amdgpu_xcp_ip_funcs {
+ int (*prepare_suspend)(void *handle, uint32_t inst_mask);
+ int (*suspend)(void *handle, uint32_t inst_mask);
+ int (*prepare_resume)(void *handle, uint32_t inst_mask);
+ int (*resume)(void *handle, uint32_t inst_mask);
+};
+
+struct amdgpu_xcp_ip {
+ struct amdgpu_xcp_ip_funcs *ip_funcs;
+ uint32_t inst_mask;
+
+ enum AMDGPU_XCP_IP_BLOCK ip_id;
+ bool valid;
+};
+
+struct amdgpu_xcp {
+ struct amdgpu_xcp_ip ip[AMDGPU_XCP_MAX_BLOCKS];
+
+ uint8_t id;
+ uint8_t mem_id;
+ bool valid;
+ atomic_t ref_cnt;
+ struct drm_device *ddev;
+ struct drm_device *rdev;
+ struct drm_device *pdev;
+ struct drm_driver *driver;
+ struct drm_vma_offset_manager *vma_offset_manager;
+ struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
+ struct amdgpu_xcp_mgr *xcp_mgr;
+ struct kobject kobj;
+ uint64_t unique_id;
+};
+
+struct amdgpu_xcp_mgr {
+ struct amdgpu_device *adev;
+ struct mutex xcp_lock;
+ struct amdgpu_xcp_mgr_funcs *funcs;
+
+ struct amdgpu_xcp xcp[MAX_XCP];
+ uint8_t num_xcps;
+ int8_t mode;
+
+ /* Used to determine KFD memory size limits per XCP */
+ unsigned int num_xcp_per_mem_partition;
+ struct amdgpu_xcp_cfg *xcp_cfg;
+ uint32_t supp_xcp_modes;
+ uint32_t avail_xcp_modes;
+};
+
+struct amdgpu_xcp_mgr_funcs {
+ int (*switch_partition_mode)(struct amdgpu_xcp_mgr *xcp_mgr, int mode,
+ int *num_xcps);
+ int (*query_partition_mode)(struct amdgpu_xcp_mgr *xcp_mgr);
+ int (*get_ip_details)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
+ enum AMDGPU_XCP_IP_BLOCK ip_id,
+ struct amdgpu_xcp_ip *ip);
+ int (*get_xcp_mem_id)(struct amdgpu_xcp_mgr *xcp_mgr,
+ struct amdgpu_xcp *xcp, uint8_t *mem_id);
+ int (*get_xcp_res_info)(struct amdgpu_xcp_mgr *xcp_mgr,
+ int mode,
+ struct amdgpu_xcp_cfg *xcp_cfg);
+ int (*prepare_suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
+ int (*suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
+ int (*prepare_resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
+ int (*resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
+};
+
+int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
+int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
+int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
+int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id);
+
+int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
+ int init_xcps, struct amdgpu_xcp_mgr_funcs *xcp_funcs);
+int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode);
+int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags);
+int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode);
+int amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr);
+int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr,
+ enum AMDGPU_XCP_IP_BLOCK ip, int instance);
+
+int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp,
+ enum AMDGPU_XCP_IP_BLOCK ip,
+ uint32_t *inst_mask);
+
+int amdgpu_xcp_dev_register(struct amdgpu_device *adev,
+ const struct pci_device_id *ent);
+void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev);
+int amdgpu_xcp_open_device(struct amdgpu_device *adev,
+ struct amdgpu_fpriv *fpriv,
+ struct drm_file *file_priv);
+void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
+ struct amdgpu_ctx_entity *entity);
+int amdgpu_xcp_select_scheds(struct amdgpu_device *adev,
+ u32 hw_ip, u32 hw_prio,
+ struct amdgpu_fpriv *fpriv,
+ unsigned int *num_scheds,
+ struct drm_gpu_scheduler ***scheds);
+void amdgpu_xcp_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr);
+int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev);
+int amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags);
+int amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags);
+void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev);
+void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev);
+
+static inline int amdgpu_xcp_get_num_xcp(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ if (!xcp_mgr)
+ return 1;
+ else
+ return xcp_mgr->num_xcps;
+}
+
+static inline struct amdgpu_xcp *
+amdgpu_get_next_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int *from)
+{
+ if (!xcp_mgr)
+ return NULL;
+
+ while (*from < MAX_XCP) {
+ if (xcp_mgr->xcp[*from].valid)
+ return &xcp_mgr->xcp[*from];
+ ++(*from);
+ }
+
+ return NULL;
+}
+
+#define for_each_xcp(xcp_mgr, xcp, i) \
+ for (i = 0, xcp = amdgpu_get_next_xcp(xcp_mgr, &i); xcp; \
+ ++i, xcp = amdgpu_get_next_xcp(xcp_mgr, &i))
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
new file mode 100644
index 000000000000..1ede308a7c67
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -0,0 +1,1795 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/list.h>
+#include "amdgpu.h"
+#include "amdgpu_xgmi.h"
+#include "amdgpu_ras.h"
+#include "soc15.h"
+#include "df/df_3_6_offset.h"
+#include "xgmi/xgmi_4_0_0_smn.h"
+#include "xgmi/xgmi_4_0_0_sh_mask.h"
+#include "xgmi/xgmi_6_1_0_sh_mask.h"
+#include "wafl/wafl2_4_0_0_smn.h"
+#include "wafl/wafl2_4_0_0_sh_mask.h"
+
+#include "amdgpu_reset.h"
+
+#define smnPCS_XGMI3X16_PCS_ERROR_STATUS 0x11a0020c
+#define smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK 0x11a00218
+#define smnPCS_GOPX1_PCS_ERROR_STATUS 0x12200210
+#define smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK 0x12200218
+
+#define XGMI_STATE_DISABLE 0xD1
+#define XGMI_STATE_LS0 0x81
+#define XGMI_LINK_ACTIVE 1
+#define XGMI_LINK_INACTIVE 0
+
+static DEFINE_MUTEX(xgmi_mutex);
+
+#define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE 4
+
+static LIST_HEAD(xgmi_hive_list);
+
+static const int xgmi_pcs_err_status_reg_vg20[] = {
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
+};
+
+static const int wafl_pcs_err_status_reg_vg20[] = {
+ smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
+ smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
+};
+
+static const int xgmi_pcs_err_status_reg_arct[] = {
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x500000,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x600000,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x700000,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x800000,
+};
+
+/* same as vg20*/
+static const int wafl_pcs_err_status_reg_arct[] = {
+ smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
+ smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
+};
+
+static const int xgmi3x16_pcs_err_status_reg_aldebaran[] = {
+ smnPCS_XGMI3X16_PCS_ERROR_STATUS,
+ smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x100000,
+ smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x200000,
+ smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x300000,
+ smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x400000,
+ smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x500000,
+ smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x600000,
+ smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x700000
+};
+
+static const int xgmi3x16_pcs_err_noncorrectable_mask_reg_aldebaran[] = {
+ smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK,
+ smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000,
+ smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x200000,
+ smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x300000,
+ smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x400000,
+ smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x500000,
+ smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x600000,
+ smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x700000
+};
+
+static const int walf_pcs_err_status_reg_aldebaran[] = {
+ smnPCS_GOPX1_PCS_ERROR_STATUS,
+ smnPCS_GOPX1_PCS_ERROR_STATUS + 0x100000
+};
+
+static const int walf_pcs_err_noncorrectable_mask_reg_aldebaran[] = {
+ smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK,
+ smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000
+};
+
+static const int xgmi3x16_pcs_err_status_reg_v6_4[] = {
+ smnPCS_XGMI3X16_PCS_ERROR_STATUS,
+ smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x100000
+};
+
+static const int xgmi3x16_pcs_err_noncorrectable_mask_reg_v6_4[] = {
+ smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK,
+ smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000
+};
+
+static const u64 xgmi_v6_4_0_mca_base_array[] = {
+ 0x11a09200,
+ 0x11b09200,
+};
+
+static const char *xgmi_v6_4_0_ras_error_code_ext[32] = {
+ [0x00] = "XGMI PCS DataLossErr",
+ [0x01] = "XGMI PCS TrainingErr",
+ [0x02] = "XGMI PCS FlowCtrlAckErr",
+ [0x03] = "XGMI PCS RxFifoUnderflowErr",
+ [0x04] = "XGMI PCS RxFifoOverflowErr",
+ [0x05] = "XGMI PCS CRCErr",
+ [0x06] = "XGMI PCS BERExceededErr",
+ [0x07] = "XGMI PCS TxMetaDataErr",
+ [0x08] = "XGMI PCS ReplayBufParityErr",
+ [0x09] = "XGMI PCS DataParityErr",
+ [0x0a] = "XGMI PCS ReplayFifoOverflowErr",
+ [0x0b] = "XGMI PCS ReplayFifoUnderflowErr",
+ [0x0c] = "XGMI PCS ElasticFifoOverflowErr",
+ [0x0d] = "XGMI PCS DeskewErr",
+ [0x0e] = "XGMI PCS FlowCtrlCRCErr",
+ [0x0f] = "XGMI PCS DataStartupLimitErr",
+ [0x10] = "XGMI PCS FCInitTimeoutErr",
+ [0x11] = "XGMI PCS RecoveryTimeoutErr",
+ [0x12] = "XGMI PCS ReadySerialTimeoutErr",
+ [0x13] = "XGMI PCS ReadySerialAttemptErr",
+ [0x14] = "XGMI PCS RecoveryAttemptErr",
+ [0x15] = "XGMI PCS RecoveryRelockAttemptErr",
+ [0x16] = "XGMI PCS ReplayAttemptErr",
+ [0x17] = "XGMI PCS SyncHdrErr",
+ [0x18] = "XGMI PCS TxReplayTimeoutErr",
+ [0x19] = "XGMI PCS RxReplayTimeoutErr",
+ [0x1a] = "XGMI PCS LinkSubTxTimeoutErr",
+ [0x1b] = "XGMI PCS LinkSubRxTimeoutErr",
+ [0x1c] = "XGMI PCS RxCMDPktErr",
+};
+
+static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = {
+ {"XGMI PCS DataLossErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)},
+ {"XGMI PCS TrainingErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TrainingErr)},
+ {"XGMI PCS CRCErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, CRCErr)},
+ {"XGMI PCS BERExceededErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, BERExceededErr)},
+ {"XGMI PCS TxMetaDataErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TxMetaDataErr)},
+ {"XGMI PCS ReplayBufParityErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayBufParityErr)},
+ {"XGMI PCS DataParityErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataParityErr)},
+ {"XGMI PCS ReplayFifoOverflowErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
+ {"XGMI PCS ReplayFifoUnderflowErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
+ {"XGMI PCS ElasticFifoOverflowErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
+ {"XGMI PCS DeskewErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DeskewErr)},
+ {"XGMI PCS DataStartupLimitErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataStartupLimitErr)},
+ {"XGMI PCS FCInitTimeoutErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, FCInitTimeoutErr)},
+ {"XGMI PCS RecoveryTimeoutErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
+ {"XGMI PCS ReadySerialTimeoutErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
+ {"XGMI PCS ReadySerialAttemptErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
+ {"XGMI PCS RecoveryAttemptErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryAttemptErr)},
+ {"XGMI PCS RecoveryRelockAttemptErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
+};
+
+static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = {
+ {"WAFL PCS DataLossErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataLossErr)},
+ {"WAFL PCS TrainingErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TrainingErr)},
+ {"WAFL PCS CRCErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, CRCErr)},
+ {"WAFL PCS BERExceededErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, BERExceededErr)},
+ {"WAFL PCS TxMetaDataErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TxMetaDataErr)},
+ {"WAFL PCS ReplayBufParityErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayBufParityErr)},
+ {"WAFL PCS DataParityErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataParityErr)},
+ {"WAFL PCS ReplayFifoOverflowErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
+ {"WAFL PCS ReplayFifoUnderflowErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
+ {"WAFL PCS ElasticFifoOverflowErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
+ {"WAFL PCS DeskewErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DeskewErr)},
+ {"WAFL PCS DataStartupLimitErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataStartupLimitErr)},
+ {"WAFL PCS FCInitTimeoutErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, FCInitTimeoutErr)},
+ {"WAFL PCS RecoveryTimeoutErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
+ {"WAFL PCS ReadySerialTimeoutErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
+ {"WAFL PCS ReadySerialAttemptErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
+ {"WAFL PCS RecoveryAttemptErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryAttemptErr)},
+ {"WAFL PCS RecoveryRelockAttemptErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
+};
+
+static const struct amdgpu_pcs_ras_field xgmi3x16_pcs_ras_fields[] = {
+ {"XGMI3X16 PCS DataLossErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataLossErr)},
+ {"XGMI3X16 PCS TrainingErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TrainingErr)},
+ {"XGMI3X16 PCS FlowCtrlAckErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FlowCtrlAckErr)},
+ {"XGMI3X16 PCS RxFifoUnderflowErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxFifoUnderflowErr)},
+ {"XGMI3X16 PCS RxFifoOverflowErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxFifoOverflowErr)},
+ {"XGMI3X16 PCS CRCErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, CRCErr)},
+ {"XGMI3X16 PCS BERExceededErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, BERExceededErr)},
+ {"XGMI3X16 PCS TxVcidDataErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TxVcidDataErr)},
+ {"XGMI3X16 PCS ReplayBufParityErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayBufParityErr)},
+ {"XGMI3X16 PCS DataParityErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataParityErr)},
+ {"XGMI3X16 PCS ReplayFifoOverflowErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
+ {"XGMI3X16 PCS ReplayFifoUnderflowErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
+ {"XGMI3X16 PCS ElasticFifoOverflowErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
+ {"XGMI3X16 PCS DeskewErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DeskewErr)},
+ {"XGMI3X16 PCS FlowCtrlCRCErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FlowCtrlCRCErr)},
+ {"XGMI3X16 PCS DataStartupLimitErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataStartupLimitErr)},
+ {"XGMI3X16 PCS FCInitTimeoutErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FCInitTimeoutErr)},
+ {"XGMI3X16 PCS RecoveryTimeoutErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
+ {"XGMI3X16 PCS ReadySerialTimeoutErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
+ {"XGMI3X16 PCS ReadySerialAttemptErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
+ {"XGMI3X16 PCS RecoveryAttemptErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryAttemptErr)},
+ {"XGMI3X16 PCS RecoveryRelockAttemptErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
+ {"XGMI3X16 PCS ReplayAttemptErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayAttemptErr)},
+ {"XGMI3X16 PCS SyncHdrErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, SyncHdrErr)},
+ {"XGMI3X16 PCS TxReplayTimeoutErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TxReplayTimeoutErr)},
+ {"XGMI3X16 PCS RxReplayTimeoutErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxReplayTimeoutErr)},
+ {"XGMI3X16 PCS LinkSubTxTimeoutErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, LinkSubTxTimeoutErr)},
+ {"XGMI3X16 PCS LinkSubRxTimeoutErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, LinkSubRxTimeoutErr)},
+ {"XGMI3X16 PCS RxCMDPktErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxCMDPktErr)},
+};
+
+int amdgpu_xgmi_get_ext_link(struct amdgpu_device *adev, int link_num)
+{
+ int link_map_6_4_x[8] = { 0, 3, 1, 2, 7, 6, 4, 5 };
+
+ switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
+ case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
+ if (link_num < ARRAY_SIZE(link_map_6_4_x))
+ return link_map_6_4_x[link_num];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+static u32 xgmi_v6_4_get_link_status(struct amdgpu_device *adev, int global_link_num)
+{
+ const u32 smn_xgmi_6_4_pcs_state_hist1[2] = { 0x11a00070, 0x11b00070 };
+ const u32 smn_xgmi_6_4_1_pcs_state_hist1[2] = { 0x12100070,
+ 0x11b00070 };
+ u32 i, n;
+ u64 addr;
+
+ switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
+ case IP_VERSION(6, 4, 0):
+ n = ARRAY_SIZE(smn_xgmi_6_4_pcs_state_hist1);
+ addr = smn_xgmi_6_4_pcs_state_hist1[global_link_num % n];
+ break;
+ case IP_VERSION(6, 4, 1):
+ n = ARRAY_SIZE(smn_xgmi_6_4_1_pcs_state_hist1);
+ addr = smn_xgmi_6_4_1_pcs_state_hist1[global_link_num % n];
+ break;
+ default:
+ return U32_MAX;
+ }
+
+ i = global_link_num / n;
+ addr += adev->asic_funcs->encode_ext_smn_addressing(i);
+
+ return RREG32_PCIE_EXT(addr);
+}
+
+int amdgpu_get_xgmi_link_status(struct amdgpu_device *adev, int global_link_num)
+{
+ u32 xgmi_state_reg_val;
+
+ switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
+ case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
+ xgmi_state_reg_val = xgmi_v6_4_get_link_status(adev, global_link_num);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if ((xgmi_state_reg_val & 0xFF) == XGMI_STATE_DISABLE)
+ return -ENOLINK;
+
+ if ((xgmi_state_reg_val & 0xFF) == XGMI_STATE_LS0)
+ return XGMI_LINK_ACTIVE;
+
+ return XGMI_LINK_INACTIVE;
+}
+
+/**
+ * DOC: AMDGPU XGMI Support
+ *
+ * XGMI is a high speed interconnect that joins multiple GPU cards
+ * into a homogeneous memory space that is organized by a collective
+ * hive ID and individual node IDs, both of which are 64-bit numbers.
+ *
+ * The file xgmi_device_id contains the unique per GPU device ID and
+ * is stored in the /sys/class/drm/card${cardno}/device/ directory.
+ *
+ * Inside the device directory a sub-directory 'xgmi_hive_info' is
+ * created which contains the hive ID and the list of nodes.
+ *
+ * The hive ID is stored in:
+ * /sys/class/drm/card${cardno}/device/xgmi_hive_info/xgmi_hive_id
+ *
+ * The node information is stored in numbered directories:
+ * /sys/class/drm/card${cardno}/device/xgmi_hive_info/node${nodeno}/xgmi_device_id
+ *
+ * Each device has their own xgmi_hive_info direction with a mirror
+ * set of node sub-directories.
+ *
+ * The XGMI memory space is built by contiguously adding the power of
+ * two padded VRAM space from each node to each other.
+ *
+ */
+
+static struct attribute amdgpu_xgmi_hive_id = {
+ .name = "xgmi_hive_id",
+ .mode = S_IRUGO
+};
+
+static struct attribute *amdgpu_xgmi_hive_attrs[] = {
+ &amdgpu_xgmi_hive_id,
+ NULL
+};
+ATTRIBUTE_GROUPS(amdgpu_xgmi_hive);
+
+static ssize_t amdgpu_xgmi_show_attrs(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct amdgpu_hive_info *hive = container_of(
+ kobj, struct amdgpu_hive_info, kobj);
+
+ if (attr == &amdgpu_xgmi_hive_id)
+ return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id);
+
+ return 0;
+}
+
+static void amdgpu_xgmi_hive_release(struct kobject *kobj)
+{
+ struct amdgpu_hive_info *hive = container_of(
+ kobj, struct amdgpu_hive_info, kobj);
+
+ amdgpu_reset_put_reset_domain(hive->reset_domain);
+ hive->reset_domain = NULL;
+
+ mutex_destroy(&hive->hive_lock);
+ kfree(hive);
+}
+
+static const struct sysfs_ops amdgpu_xgmi_hive_ops = {
+ .show = amdgpu_xgmi_show_attrs,
+};
+
+static const struct kobj_type amdgpu_xgmi_hive_type = {
+ .release = amdgpu_xgmi_hive_release,
+ .sysfs_ops = &amdgpu_xgmi_hive_ops,
+ .default_groups = amdgpu_xgmi_hive_groups,
+};
+
+static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ return sysfs_emit(buf, "%llu\n", adev->gmc.xgmi.node_id);
+
+}
+
+static ssize_t amdgpu_xgmi_show_physical_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+
+ return sysfs_emit(buf, "%u\n", adev->gmc.xgmi.physical_node_id);
+
+}
+
+static ssize_t amdgpu_xgmi_show_num_hops(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
+ int i;
+
+ for (i = 0; i < top->num_nodes; i++)
+ sprintf(buf + 3 * i, "%02x ", top->nodes[i].num_hops);
+
+ return sysfs_emit(buf, "%s\n", buf);
+}
+
+static ssize_t amdgpu_xgmi_show_num_links(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
+ int i;
+
+ for (i = 0; i < top->num_nodes; i++)
+ sprintf(buf + 3 * i, "%02x ", top->nodes[i].num_links);
+
+ return sysfs_emit(buf, "%s\n", buf);
+}
+
+static ssize_t amdgpu_xgmi_show_connected_port_num(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
+ int i, j, size = 0;
+ int current_node;
+ /*
+ * get the node id in the sysfs for the current socket and show
+ * it in the port num info output in the sysfs for easy reading.
+ * it is NOT the one retrieved from xgmi ta.
+ */
+ for (i = 0; i < top->num_nodes; i++) {
+ if (top->nodes[i].node_id == adev->gmc.xgmi.node_id) {
+ current_node = i;
+ break;
+ }
+ }
+
+ if (i == top->num_nodes)
+ return -EINVAL;
+
+ for (i = 0; i < top->num_nodes; i++) {
+ for (j = 0; j < top->nodes[i].num_links; j++)
+ /* node id in sysfs starts from 1 rather than 0 so +1 here */
+ size += sysfs_emit_at(buf, size, "%02x:%02x -> %02x:%02x\n", current_node + 1,
+ top->nodes[i].port_num[j].src_xgmi_port_num, i + 1,
+ top->nodes[i].port_num[j].dst_xgmi_port_num);
+ }
+
+ return size;
+}
+
+#define AMDGPU_XGMI_SET_FICAA(o) ((o) | 0x456801)
+static ssize_t amdgpu_xgmi_show_error(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(ddev);
+ uint32_t ficaa_pie_ctl_in, ficaa_pie_status_in;
+ uint64_t fica_out;
+ unsigned int error_count = 0;
+
+ ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200);
+ ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208);
+
+ if ((!adev->df.funcs) ||
+ (!adev->df.funcs->get_fica) ||
+ (!adev->df.funcs->set_fica))
+ return -EINVAL;
+
+ fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in);
+ if (fica_out != 0x1f)
+ pr_err("xGMI error counters not enabled!\n");
+
+ fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_status_in);
+
+ if ((fica_out & 0xffff) == 2)
+ error_count = ((fica_out >> 62) & 0x1) + (fica_out >> 63);
+
+ adev->df.funcs->set_fica(adev, ficaa_pie_status_in, 0, 0);
+
+ return sysfs_emit(buf, "%u\n", error_count);
+}
+
+
+static DEVICE_ATTR(xgmi_device_id, S_IRUGO, amdgpu_xgmi_show_device_id, NULL);
+static DEVICE_ATTR(xgmi_physical_id, 0444, amdgpu_xgmi_show_physical_id, NULL);
+static DEVICE_ATTR(xgmi_error, S_IRUGO, amdgpu_xgmi_show_error, NULL);
+static DEVICE_ATTR(xgmi_num_hops, S_IRUGO, amdgpu_xgmi_show_num_hops, NULL);
+static DEVICE_ATTR(xgmi_num_links, S_IRUGO, amdgpu_xgmi_show_num_links, NULL);
+static DEVICE_ATTR(xgmi_port_num, S_IRUGO, amdgpu_xgmi_show_connected_port_num, NULL);
+
+static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
+ struct amdgpu_hive_info *hive)
+{
+ int ret = 0;
+ char node[10] = { 0 };
+
+ /* Create xgmi device id file */
+ ret = device_create_file(adev->dev, &dev_attr_xgmi_device_id);
+ if (ret) {
+ dev_err(adev->dev, "XGMI: Failed to create device file xgmi_device_id\n");
+ return ret;
+ }
+
+ ret = device_create_file(adev->dev, &dev_attr_xgmi_physical_id);
+ if (ret) {
+ dev_err(adev->dev, "XGMI: Failed to create device file xgmi_physical_id\n");
+ return ret;
+ }
+
+ /* Create xgmi error file */
+ ret = device_create_file(adev->dev, &dev_attr_xgmi_error);
+ if (ret)
+ pr_err("failed to create xgmi_error\n");
+
+ /* Create xgmi num hops file */
+ ret = device_create_file(adev->dev, &dev_attr_xgmi_num_hops);
+ if (ret)
+ pr_err("failed to create xgmi_num_hops\n");
+
+ /* Create xgmi num links file */
+ ret = device_create_file(adev->dev, &dev_attr_xgmi_num_links);
+ if (ret)
+ pr_err("failed to create xgmi_num_links\n");
+
+ /* Create xgmi port num file if supported */
+ if (adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG) {
+ ret = device_create_file(adev->dev, &dev_attr_xgmi_port_num);
+ if (ret)
+ dev_err(adev->dev, "failed to create xgmi_port_num\n");
+ }
+
+ /* Create sysfs link to hive info folder on the first device */
+ if (hive->kobj.parent != (&adev->dev->kobj)) {
+ ret = sysfs_create_link(&adev->dev->kobj, &hive->kobj,
+ "xgmi_hive_info");
+ if (ret) {
+ dev_err(adev->dev, "XGMI: Failed to create link to hive info");
+ goto remove_file;
+ }
+ }
+
+ sprintf(node, "node%d", atomic_read(&hive->number_devices));
+ /* Create sysfs link form the hive folder to yourself */
+ ret = sysfs_create_link(&hive->kobj, &adev->dev->kobj, node);
+ if (ret) {
+ dev_err(adev->dev, "XGMI: Failed to create link from hive info");
+ goto remove_link;
+ }
+
+ goto success;
+
+
+remove_link:
+ sysfs_remove_link(&adev->dev->kobj, adev_to_drm(adev)->unique);
+
+remove_file:
+ device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
+ device_remove_file(adev->dev, &dev_attr_xgmi_physical_id);
+ device_remove_file(adev->dev, &dev_attr_xgmi_error);
+ device_remove_file(adev->dev, &dev_attr_xgmi_num_hops);
+ device_remove_file(adev->dev, &dev_attr_xgmi_num_links);
+ if (adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG)
+ device_remove_file(adev->dev, &dev_attr_xgmi_port_num);
+
+success:
+ return ret;
+}
+
+static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
+ struct amdgpu_hive_info *hive)
+{
+ char node[10];
+ memset(node, 0, sizeof(node));
+
+ device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
+ device_remove_file(adev->dev, &dev_attr_xgmi_physical_id);
+ device_remove_file(adev->dev, &dev_attr_xgmi_error);
+ device_remove_file(adev->dev, &dev_attr_xgmi_num_hops);
+ device_remove_file(adev->dev, &dev_attr_xgmi_num_links);
+ if (adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG)
+ device_remove_file(adev->dev, &dev_attr_xgmi_port_num);
+
+ if (hive->kobj.parent != (&adev->dev->kobj))
+ sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info");
+
+ sprintf(node, "node%d", atomic_read(&hive->number_devices));
+ sysfs_remove_link(&hive->kobj, node);
+
+}
+
+
+
+struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
+{
+ struct amdgpu_hive_info *hive = NULL;
+ int ret;
+
+ if (!adev->gmc.xgmi.hive_id)
+ return NULL;
+
+ if (adev->hive) {
+ kobject_get(&adev->hive->kobj);
+ return adev->hive;
+ }
+
+ mutex_lock(&xgmi_mutex);
+
+ list_for_each_entry(hive, &xgmi_hive_list, node) {
+ if (hive->hive_id == adev->gmc.xgmi.hive_id)
+ goto pro_end;
+ }
+
+ hive = kzalloc(sizeof(*hive), GFP_KERNEL);
+ if (!hive) {
+ dev_err(adev->dev, "XGMI: allocation failed\n");
+ ret = -ENOMEM;
+ hive = NULL;
+ goto pro_end;
+ }
+
+ /* initialize new hive if not exist */
+ ret = kobject_init_and_add(&hive->kobj,
+ &amdgpu_xgmi_hive_type,
+ &adev->dev->kobj,
+ "%s", "xgmi_hive_info");
+ if (ret) {
+ dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n");
+ kobject_put(&hive->kobj);
+ hive = NULL;
+ goto pro_end;
+ }
+
+ /**
+ * Only init hive->reset_domain for none SRIOV configuration. For SRIOV,
+ * Host driver decide how to reset the GPU either through FLR or chain reset.
+ * Guest side will get individual notifications from the host for the FLR
+ * if necessary.
+ */
+ if (!amdgpu_sriov_vf(adev)) {
+ /**
+ * Avoid recreating reset domain when hive is reconstructed for the case
+ * of reset the devices in the XGMI hive during probe for passthrough GPU
+ * See https://www.spinics.net/lists/amd-gfx/msg58836.html
+ */
+ if (adev->reset_domain->type != XGMI_HIVE) {
+ hive->reset_domain =
+ amdgpu_reset_create_reset_domain(XGMI_HIVE, "amdgpu-reset-hive");
+ if (!hive->reset_domain) {
+ dev_err(adev->dev, "XGMI: failed initializing reset domain for xgmi hive\n");
+ ret = -ENOMEM;
+ kobject_put(&hive->kobj);
+ hive = NULL;
+ goto pro_end;
+ }
+ } else {
+ amdgpu_reset_get_reset_domain(adev->reset_domain);
+ hive->reset_domain = adev->reset_domain;
+ }
+ }
+
+ hive->hive_id = adev->gmc.xgmi.hive_id;
+ INIT_LIST_HEAD(&hive->device_list);
+ INIT_LIST_HEAD(&hive->node);
+ mutex_init(&hive->hive_lock);
+ atomic_set(&hive->number_devices, 0);
+ task_barrier_init(&hive->tb);
+ hive->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN;
+ hive->hi_req_gpu = NULL;
+ atomic_set(&hive->requested_nps_mode, UNKNOWN_MEMORY_PARTITION_MODE);
+
+ /*
+ * hive pstate on boot is high in vega20 so we have to go to low
+ * pstate on after boot.
+ */
+ hive->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE;
+ list_add_tail(&hive->node, &xgmi_hive_list);
+
+pro_end:
+ if (hive)
+ kobject_get(&hive->kobj);
+ mutex_unlock(&xgmi_mutex);
+ return hive;
+}
+
+void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive)
+{
+ if (hive)
+ kobject_put(&hive->kobj);
+}
+
+int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
+{
+ int ret = 0;
+ struct amdgpu_hive_info *hive;
+ struct amdgpu_device *request_adev;
+ bool is_hi_req = pstate == AMDGPU_XGMI_PSTATE_MAX_VEGA20;
+ bool init_low;
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (!hive)
+ return 0;
+
+ request_adev = hive->hi_req_gpu ? hive->hi_req_gpu : adev;
+ init_low = hive->pstate == AMDGPU_XGMI_PSTATE_UNKNOWN;
+ amdgpu_put_xgmi_hive(hive);
+ /* fw bug so temporarily disable pstate switching */
+ return 0;
+
+ if (!hive || adev->asic_type != CHIP_VEGA20)
+ return 0;
+
+ mutex_lock(&hive->hive_lock);
+
+ if (is_hi_req)
+ hive->hi_req_count++;
+ else
+ hive->hi_req_count--;
+
+ /*
+ * Vega20 only needs single peer to request pstate high for the hive to
+ * go high but all peers must request pstate low for the hive to go low
+ */
+ if (hive->pstate == pstate ||
+ (!is_hi_req && hive->hi_req_count && !init_low))
+ goto out;
+
+ dev_dbg(request_adev->dev, "Set xgmi pstate %d.\n", pstate);
+
+ ret = amdgpu_dpm_set_xgmi_pstate(request_adev, pstate);
+ if (ret) {
+ dev_err(request_adev->dev,
+ "XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
+ request_adev->gmc.xgmi.node_id,
+ request_adev->gmc.xgmi.hive_id, ret);
+ goto out;
+ }
+
+ if (init_low)
+ hive->pstate = hive->hi_req_count ?
+ hive->pstate : AMDGPU_XGMI_PSTATE_MIN;
+ else {
+ hive->pstate = pstate;
+ hive->hi_req_gpu = pstate != AMDGPU_XGMI_PSTATE_MIN ?
+ adev : NULL;
+ }
+out:
+ mutex_unlock(&hive->hive_lock);
+ return ret;
+}
+
+int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
+{
+ int ret;
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ /* Each psp need to set the latest topology */
+ ret = psp_xgmi_set_topology_info(&adev->psp,
+ atomic_read(&hive->number_devices),
+ &adev->psp.xgmi_context.top_info);
+ if (ret)
+ dev_err(adev->dev,
+ "XGMI: Set topology failure on device %llx, hive %llx, ret %d",
+ adev->gmc.xgmi.node_id,
+ adev->gmc.xgmi.hive_id, ret);
+
+ return ret;
+}
+
+
+/*
+ * NOTE psp_xgmi_node_info.num_hops layout is as follows:
+ * num_hops[7:6] = link type (0 = xGMI2, 1 = xGMI3, 2/3 = reserved)
+ * num_hops[5:3] = reserved
+ * num_hops[2:0] = number of hops
+ */
+int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
+ struct amdgpu_device *peer_adev)
+{
+ struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
+ uint8_t num_hops_mask = 0x7;
+ int i;
+
+ if (!adev->gmc.xgmi.supported)
+ return 0;
+
+ for (i = 0 ; i < top->num_nodes; ++i)
+ if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
+ return top->nodes[i].num_hops & num_hops_mask;
+
+ dev_err(adev->dev, "Failed to get xgmi hops count for peer %d.\n",
+ peer_adev->gmc.xgmi.physical_node_id);
+
+ return 0;
+}
+
+int amdgpu_xgmi_get_bandwidth(struct amdgpu_device *adev, struct amdgpu_device *peer_adev,
+ enum amdgpu_xgmi_bw_mode bw_mode, enum amdgpu_xgmi_bw_unit bw_unit,
+ uint32_t *min_bw, uint32_t *max_bw)
+{
+ bool peer_mode = bw_mode == AMDGPU_XGMI_BW_MODE_PER_PEER;
+ int unit_scale = bw_unit == AMDGPU_XGMI_BW_UNIT_MBYTES ? 1000 : 1;
+ int num_lanes = adev->gmc.xgmi.max_width;
+ int speed = adev->gmc.xgmi.max_speed;
+ int num_links = !peer_mode ? 1 : -1;
+
+ if (!(min_bw && max_bw))
+ return -EINVAL;
+
+ *min_bw = 0;
+ *max_bw = 0;
+
+ if (!adev->gmc.xgmi.supported)
+ return -ENODATA;
+
+ if (peer_mode && !peer_adev)
+ return -EINVAL;
+
+ if (peer_mode) {
+ struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
+ int i;
+
+ for (i = 0 ; i < top->num_nodes; ++i) {
+ if (top->nodes[i].node_id != peer_adev->gmc.xgmi.node_id)
+ continue;
+
+ num_links = top->nodes[i].num_links;
+ break;
+ }
+ }
+
+ if (num_links == -1) {
+ dev_err(adev->dev, "Failed to get number of xgmi links for peer %d.\n",
+ peer_adev->gmc.xgmi.physical_node_id);
+ } else if (num_links) {
+ int per_link_bw = (speed * num_lanes * unit_scale)/BITS_PER_BYTE;
+
+ *min_bw = per_link_bw;
+ *max_bw = num_links * per_link_bw;
+ }
+
+ return 0;
+}
+
+bool amdgpu_xgmi_get_is_sharing_enabled(struct amdgpu_device *adev,
+ struct amdgpu_device *peer_adev)
+{
+ struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
+ int i;
+
+ /* Sharing should always be enabled for non-SRIOV. */
+ if (!amdgpu_sriov_vf(adev))
+ return true;
+
+ for (i = 0 ; i < top->num_nodes; ++i)
+ if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
+ return !!top->nodes[i].is_sharing_enabled;
+
+ return false;
+}
+
+/*
+ * Devices that support extended data require the entire hive to initialize with
+ * the shared memory buffer flag set.
+ *
+ * Hive locks and conditions apply - see amdgpu_xgmi_add_device
+ */
+static int amdgpu_xgmi_initialize_hive_get_data_partition(struct amdgpu_hive_info *hive,
+ bool set_extended_data)
+{
+ struct amdgpu_device *tmp_adev;
+ int ret;
+
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ ret = psp_xgmi_initialize(&tmp_adev->psp, set_extended_data, false);
+ if (ret) {
+ dev_err(tmp_adev->dev,
+ "XGMI: Failed to initialize xgmi session for data partition %i\n",
+ set_extended_data);
+ return ret;
+ }
+
+ }
+
+ return 0;
+}
+
+static void amdgpu_xgmi_fill_topology_info(struct amdgpu_device *adev,
+ struct amdgpu_device *peer_adev)
+{
+ struct psp_xgmi_topology_info *top_info = &adev->psp.xgmi_context.top_info;
+ struct psp_xgmi_topology_info *peer_info = &peer_adev->psp.xgmi_context.top_info;
+
+ for (int i = 0; i < peer_info->num_nodes; i++) {
+ if (peer_info->nodes[i].node_id == adev->gmc.xgmi.node_id) {
+ for (int j = 0; j < top_info->num_nodes; j++) {
+ if (top_info->nodes[j].node_id == peer_adev->gmc.xgmi.node_id) {
+ peer_info->nodes[i].num_hops = top_info->nodes[j].num_hops;
+ peer_info->nodes[i].is_sharing_enabled =
+ top_info->nodes[j].is_sharing_enabled;
+ peer_info->nodes[i].num_links =
+ top_info->nodes[j].num_links;
+ return;
+ }
+ }
+ }
+ }
+}
+
+int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
+{
+ struct psp_xgmi_topology_info *top_info;
+ struct amdgpu_hive_info *hive;
+ struct amdgpu_xgmi *entry;
+ struct amdgpu_device *tmp_adev = NULL;
+
+ int count = 0, ret = 0;
+
+ if (!adev->gmc.xgmi.supported)
+ return 0;
+
+ if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
+ ret = psp_xgmi_initialize(&adev->psp, false, true);
+ if (ret) {
+ dev_err(adev->dev,
+ "XGMI: Failed to initialize xgmi session\n");
+ return ret;
+ }
+
+ ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
+ if (ret) {
+ dev_err(adev->dev,
+ "XGMI: Failed to get hive id\n");
+ return ret;
+ }
+
+ ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id);
+ if (ret) {
+ dev_err(adev->dev,
+ "XGMI: Failed to get node id\n");
+ return ret;
+ }
+ } else {
+ adev->gmc.xgmi.hive_id = 16;
+ adev->gmc.xgmi.node_id = adev->gmc.xgmi.physical_node_id + 16;
+ }
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (!hive) {
+ ret = -EINVAL;
+ dev_err(adev->dev,
+ "XGMI: node 0x%llx, can not match hive 0x%llx in the hive list.\n",
+ adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id);
+ goto exit;
+ }
+ mutex_lock(&hive->hive_lock);
+
+ top_info = &adev->psp.xgmi_context.top_info;
+
+ list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
+ list_for_each_entry(entry, &hive->device_list, head)
+ top_info->nodes[count++].node_id = entry->node_id;
+ top_info->num_nodes = count;
+ atomic_set(&hive->number_devices, count);
+
+ task_barrier_add_task(&hive->tb);
+
+ if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ /* update node list for other device in the hive */
+ if (tmp_adev != adev) {
+ top_info = &tmp_adev->psp.xgmi_context.top_info;
+ top_info->nodes[count - 1].node_id =
+ adev->gmc.xgmi.node_id;
+ top_info->num_nodes = count;
+ }
+ ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
+ if (ret)
+ goto exit_unlock;
+ }
+
+ if (amdgpu_sriov_vf(adev) &&
+ adev->psp.xgmi_context.xgmi_ta_caps & EXTEND_PEER_LINK_INFO_CMD_FLAG) {
+ /* only get topology for VF being init if it can support full duplex */
+ ret = psp_xgmi_get_topology_info(&adev->psp, count,
+ &adev->psp.xgmi_context.top_info, false);
+ if (ret) {
+ dev_err(adev->dev,
+ "XGMI: Get topology failure on device %llx, hive %llx, ret %d",
+ adev->gmc.xgmi.node_id,
+ adev->gmc.xgmi.hive_id, ret);
+ /* To do: continue with some node failed or disable the whole hive*/
+ goto exit_unlock;
+ }
+
+ /* fill the topology info for peers instead of getting from PSP */
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ amdgpu_xgmi_fill_topology_info(adev, tmp_adev);
+ }
+ } else {
+ /* get latest topology info for each device from psp */
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
+ &tmp_adev->psp.xgmi_context.top_info, false);
+ if (ret) {
+ dev_err(tmp_adev->dev,
+ "XGMI: Get topology failure on device %llx, hive %llx, ret %d",
+ tmp_adev->gmc.xgmi.node_id,
+ tmp_adev->gmc.xgmi.hive_id, ret);
+ /* To do : continue with some node failed or disable the whole hive */
+ goto exit_unlock;
+ }
+ }
+ }
+
+ /* get topology again for hives that support extended data */
+ if (adev->psp.xgmi_context.supports_extended_data) {
+
+ /* initialize the hive to get extended data. */
+ ret = amdgpu_xgmi_initialize_hive_get_data_partition(hive, true);
+ if (ret)
+ goto exit_unlock;
+
+ /* get the extended data. */
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
+ &tmp_adev->psp.xgmi_context.top_info, true);
+ if (ret) {
+ dev_err(tmp_adev->dev,
+ "XGMI: Get topology for extended data failure on device %llx, hive %llx, ret %d",
+ tmp_adev->gmc.xgmi.node_id,
+ tmp_adev->gmc.xgmi.hive_id, ret);
+ goto exit_unlock;
+ }
+ }
+
+ /* initialize the hive to get non-extended data for the next round. */
+ ret = amdgpu_xgmi_initialize_hive_get_data_partition(hive, false);
+ if (ret)
+ goto exit_unlock;
+
+ }
+ }
+
+ if (!ret)
+ ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive);
+
+exit_unlock:
+ mutex_unlock(&hive->hive_lock);
+exit:
+ if (!ret) {
+ adev->hive = hive;
+ dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
+ adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
+ } else {
+ amdgpu_put_xgmi_hive(hive);
+ dev_err(adev->dev, "XGMI: Failed to add node %d, hive 0x%llx ret: %d\n",
+ adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id,
+ ret);
+ }
+
+ return ret;
+}
+
+int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
+{
+ struct amdgpu_hive_info *hive = adev->hive;
+
+ if (!adev->gmc.xgmi.supported)
+ return -EINVAL;
+
+ if (!hive)
+ return -EINVAL;
+
+ mutex_lock(&hive->hive_lock);
+ task_barrier_rem_task(&hive->tb);
+ amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
+ if (hive->hi_req_gpu == adev)
+ hive->hi_req_gpu = NULL;
+ list_del(&adev->gmc.xgmi.head);
+ mutex_unlock(&hive->hive_lock);
+
+ amdgpu_put_xgmi_hive(hive);
+ adev->hive = NULL;
+
+ if (atomic_dec_return(&hive->number_devices) == 0) {
+ /* Remove the hive from global hive list */
+ mutex_lock(&xgmi_mutex);
+ list_del(&hive->node);
+ mutex_unlock(&xgmi_mutex);
+
+ amdgpu_put_xgmi_hive(hive);
+ }
+
+ return 0;
+}
+
+static int xgmi_v6_4_0_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
+ enum aca_smu_type type, void *data)
+{
+ struct amdgpu_device *adev = handle->adev;
+ struct aca_bank_info info;
+ const char *error_str;
+ u64 status, count;
+ int ret, ext_error_code;
+
+ ret = aca_bank_info_decode(bank, &info);
+ if (ret)
+ return ret;
+
+ status = bank->regs[ACA_REG_IDX_STATUS];
+ ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status);
+
+ error_str = ext_error_code < ARRAY_SIZE(xgmi_v6_4_0_ras_error_code_ext) ?
+ xgmi_v6_4_0_ras_error_code_ext[ext_error_code] : NULL;
+ if (error_str)
+ dev_info(adev->dev, "%s detected\n", error_str);
+
+ count = ACA_REG__MISC0__ERRCNT(bank->regs[ACA_REG_IDX_MISC0]);
+
+ switch (type) {
+ case ACA_SMU_TYPE_UE:
+ if (ext_error_code != 0 && ext_error_code != 9)
+ count = 0ULL;
+
+ bank->aca_err_type = ACA_ERROR_TYPE_UE;
+ ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, count);
+ break;
+ case ACA_SMU_TYPE_CE:
+ count = ext_error_code == 6 ? count : 0ULL;
+ bank->aca_err_type = ACA_ERROR_TYPE_CE;
+ ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, count);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct aca_bank_ops xgmi_v6_4_0_aca_bank_ops = {
+ .aca_bank_parser = xgmi_v6_4_0_aca_bank_parser,
+};
+
+static const struct aca_info xgmi_v6_4_0_aca_info = {
+ .hwip = ACA_HWIP_TYPE_PCS_XGMI,
+ .mask = ACA_ERROR_UE_MASK | ACA_ERROR_CE_MASK,
+ .bank_ops = &xgmi_v6_4_0_aca_bank_ops,
+};
+
+static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+ int r;
+
+ if (!adev->gmc.xgmi.supported ||
+ adev->gmc.xgmi.num_physical_nodes == 0)
+ return 0;
+
+ amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL);
+
+ r = amdgpu_ras_block_late_init(adev, ras_block);
+ if (r)
+ return r;
+
+ switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
+ case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
+ r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL,
+ &xgmi_v6_4_0_aca_info, NULL);
+ if (r)
+ goto late_fini;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+
+late_fini:
+ amdgpu_ras_block_late_fini(adev, ras_block);
+
+ return r;
+}
+
+uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
+ uint64_t addr)
+{
+ struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi;
+ return (addr + xgmi->physical_node_id * xgmi->node_segment_size);
+}
+
+static void pcs_clear_status(struct amdgpu_device *adev, uint32_t pcs_status_reg)
+{
+ WREG32_PCIE(pcs_status_reg, 0xFFFFFFFF);
+ WREG32_PCIE(pcs_status_reg, 0);
+}
+
+static void amdgpu_xgmi_legacy_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ uint32_t i;
+
+ switch (adev->asic_type) {
+ case CHIP_ARCTURUS:
+ for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++)
+ pcs_clear_status(adev,
+ xgmi_pcs_err_status_reg_arct[i]);
+ break;
+ case CHIP_VEGA20:
+ for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++)
+ pcs_clear_status(adev,
+ xgmi_pcs_err_status_reg_vg20[i]);
+ break;
+ case CHIP_ALDEBARAN:
+ for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++)
+ pcs_clear_status(adev,
+ xgmi3x16_pcs_err_status_reg_aldebaran[i]);
+ for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++)
+ pcs_clear_status(adev,
+ walf_pcs_err_status_reg_aldebaran[i]);
+ break;
+ default:
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
+ case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
+ for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_v6_4); i++)
+ pcs_clear_status(adev,
+ xgmi3x16_pcs_err_status_reg_v6_4[i]);
+ break;
+ default:
+ break;
+ }
+}
+
+static void __xgmi_v6_4_0_reset_error_count(struct amdgpu_device *adev, int xgmi_inst, u64 mca_base)
+{
+ WREG64_MCA(xgmi_inst, mca_base, ACA_REG_IDX_STATUS, 0ULL);
+}
+
+static void xgmi_v6_4_0_reset_error_count(struct amdgpu_device *adev, int xgmi_inst)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(xgmi_v6_4_0_mca_base_array); i++)
+ __xgmi_v6_4_0_reset_error_count(adev, xgmi_inst, xgmi_v6_4_0_mca_base_array[i]);
+}
+
+static void xgmi_v6_4_0_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ int i;
+
+ for_each_inst(i, adev->aid_mask)
+ xgmi_v6_4_0_reset_error_count(adev, i);
+}
+
+static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
+ case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
+ xgmi_v6_4_0_reset_ras_error_count(adev);
+ break;
+ default:
+ amdgpu_xgmi_legacy_reset_ras_error_count(adev);
+ break;
+ }
+}
+
+static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
+ uint32_t value,
+ uint32_t mask_value,
+ uint32_t *ue_count,
+ uint32_t *ce_count,
+ bool is_xgmi_pcs,
+ bool check_mask)
+{
+ int i;
+ int ue_cnt = 0;
+ const struct amdgpu_pcs_ras_field *pcs_ras_fields = NULL;
+ uint32_t field_array_size = 0;
+
+ if (is_xgmi_pcs) {
+ if (amdgpu_ip_version(adev, XGMI_HWIP, 0) ==
+ IP_VERSION(6, 1, 0) ||
+ amdgpu_ip_version(adev, XGMI_HWIP, 0) ==
+ IP_VERSION(6, 4, 0) ||
+ amdgpu_ip_version(adev, XGMI_HWIP, 0) ==
+ IP_VERSION(6, 4, 1)) {
+ pcs_ras_fields = &xgmi3x16_pcs_ras_fields[0];
+ field_array_size = ARRAY_SIZE(xgmi3x16_pcs_ras_fields);
+ } else {
+ pcs_ras_fields = &xgmi_pcs_ras_fields[0];
+ field_array_size = ARRAY_SIZE(xgmi_pcs_ras_fields);
+ }
+ } else {
+ pcs_ras_fields = &wafl_pcs_ras_fields[0];
+ field_array_size = ARRAY_SIZE(wafl_pcs_ras_fields);
+ }
+
+ if (check_mask)
+ value = value & ~mask_value;
+
+ /* query xgmi/walf pcs error status,
+ * only ue is supported */
+ for (i = 0; value && i < field_array_size; i++) {
+ ue_cnt = (value &
+ pcs_ras_fields[i].pcs_err_mask) >>
+ pcs_ras_fields[i].pcs_err_shift;
+ if (ue_cnt) {
+ dev_info(adev->dev, "%s detected\n",
+ pcs_ras_fields[i].err_name);
+ *ue_count += ue_cnt;
+ }
+
+ /* reset bit value if the bit is checked */
+ value &= ~(pcs_ras_fields[i].pcs_err_mask);
+ }
+
+ return 0;
+}
+
+static void amdgpu_xgmi_legacy_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ int i, supported = 1;
+ uint32_t data, mask_data = 0;
+ uint32_t ue_cnt = 0, ce_cnt = 0;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL))
+ return ;
+
+ err_data->ue_count = 0;
+ err_data->ce_count = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_ARCTURUS:
+ /* check xgmi pcs error */
+ for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) {
+ data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]);
+ if (data)
+ amdgpu_xgmi_query_pcs_error_status(adev, data,
+ mask_data, &ue_cnt, &ce_cnt, true, false);
+ }
+ /* check wafl pcs error */
+ for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) {
+ data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]);
+ if (data)
+ amdgpu_xgmi_query_pcs_error_status(adev, data,
+ mask_data, &ue_cnt, &ce_cnt, false, false);
+ }
+ break;
+ case CHIP_VEGA20:
+ /* check xgmi pcs error */
+ for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) {
+ data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]);
+ if (data)
+ amdgpu_xgmi_query_pcs_error_status(adev, data,
+ mask_data, &ue_cnt, &ce_cnt, true, false);
+ }
+ /* check wafl pcs error */
+ for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) {
+ data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]);
+ if (data)
+ amdgpu_xgmi_query_pcs_error_status(adev, data,
+ mask_data, &ue_cnt, &ce_cnt, false, false);
+ }
+ break;
+ case CHIP_ALDEBARAN:
+ /* check xgmi3x16 pcs error */
+ for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++) {
+ data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_aldebaran[i]);
+ mask_data =
+ RREG32_PCIE(xgmi3x16_pcs_err_noncorrectable_mask_reg_aldebaran[i]);
+ if (data)
+ amdgpu_xgmi_query_pcs_error_status(adev, data,
+ mask_data, &ue_cnt, &ce_cnt, true, true);
+ }
+ /* check wafl pcs error */
+ for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++) {
+ data = RREG32_PCIE(walf_pcs_err_status_reg_aldebaran[i]);
+ mask_data =
+ RREG32_PCIE(walf_pcs_err_noncorrectable_mask_reg_aldebaran[i]);
+ if (data)
+ amdgpu_xgmi_query_pcs_error_status(adev, data,
+ mask_data, &ue_cnt, &ce_cnt, false, true);
+ }
+ break;
+ default:
+ supported = 0;
+ break;
+ }
+
+ switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
+ case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
+ /* check xgmi3x16 pcs error */
+ for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_v6_4); i++) {
+ data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_v6_4[i]);
+ mask_data =
+ RREG32_PCIE(xgmi3x16_pcs_err_noncorrectable_mask_reg_v6_4[i]);
+ if (data)
+ amdgpu_xgmi_query_pcs_error_status(adev, data,
+ mask_data, &ue_cnt, &ce_cnt, true, true);
+ }
+ break;
+ default:
+ if (!supported)
+ dev_warn(adev->dev, "XGMI RAS error query not supported");
+ break;
+ }
+
+ amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL);
+
+ err_data->ue_count += ue_cnt;
+ err_data->ce_count += ce_cnt;
+}
+
+static enum aca_error_type xgmi_v6_4_0_pcs_mca_get_error_type(struct amdgpu_device *adev, u64 status)
+{
+ const char *error_str;
+ int ext_error_code;
+
+ ext_error_code = ACA_REG__STATUS__ERRORCODEEXT(status);
+
+ error_str = ext_error_code < ARRAY_SIZE(xgmi_v6_4_0_ras_error_code_ext) ?
+ xgmi_v6_4_0_ras_error_code_ext[ext_error_code] : NULL;
+ if (error_str)
+ dev_info(adev->dev, "%s detected\n", error_str);
+
+ switch (ext_error_code) {
+ case 0:
+ return ACA_ERROR_TYPE_UE;
+ case 6:
+ return ACA_ERROR_TYPE_CE;
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+static void __xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, struct amdgpu_smuio_mcm_config_info *mcm_info,
+ u64 mca_base, struct ras_err_data *err_data)
+{
+ int xgmi_inst = mcm_info->die_id;
+ u64 status = 0;
+
+ status = RREG64_MCA(xgmi_inst, mca_base, ACA_REG_IDX_STATUS);
+ if (!ACA_REG__STATUS__VAL(status))
+ return;
+
+ switch (xgmi_v6_4_0_pcs_mca_get_error_type(adev, status)) {
+ case ACA_ERROR_TYPE_UE:
+ amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, 1ULL);
+ break;
+ case ACA_ERROR_TYPE_CE:
+ amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, 1ULL);
+ break;
+ default:
+ break;
+ }
+
+ WREG64_MCA(xgmi_inst, mca_base, ACA_REG_IDX_STATUS, 0ULL);
+}
+
+static void xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, int xgmi_inst, struct ras_err_data *err_data)
+{
+ struct amdgpu_smuio_mcm_config_info mcm_info = {
+ .socket_id = adev->smuio.funcs->get_socket_id(adev),
+ .die_id = xgmi_inst,
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(xgmi_v6_4_0_mca_base_array); i++)
+ __xgmi_v6_4_0_query_error_count(adev, &mcm_info, xgmi_v6_4_0_mca_base_array[i], err_data);
+}
+
+static void xgmi_v6_4_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ int i;
+
+ for_each_inst(i, adev->aid_mask)
+ xgmi_v6_4_0_query_error_count(adev, i, err_data);
+}
+
+static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
+ case IP_VERSION(6, 4, 0):
+ case IP_VERSION(6, 4, 1):
+ xgmi_v6_4_0_query_ras_error_count(adev, ras_error_status);
+ break;
+ default:
+ amdgpu_xgmi_legacy_query_ras_error_count(adev, ras_error_status);
+ break;
+ }
+}
+
+/* Trigger XGMI/WAFL error */
+static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
+ void *inject_if, uint32_t instance_mask)
+{
+ int ret1, ret2;
+ struct ta_ras_trigger_error_input *block_info =
+ (struct ta_ras_trigger_error_input *)inject_if;
+
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ dev_warn(adev->dev, "Failed to disallow df cstate");
+
+ ret1 = amdgpu_dpm_set_pm_policy(adev, PP_PM_POLICY_XGMI_PLPD, XGMI_PLPD_DISALLOW);
+ if (ret1 && ret1 != -EOPNOTSUPP)
+ dev_warn(adev->dev, "Failed to disallow XGMI power down");
+
+ ret2 = psp_ras_trigger_error(&adev->psp, block_info, instance_mask);
+
+ if (amdgpu_ras_intr_triggered())
+ return ret2;
+
+ ret1 = amdgpu_dpm_set_pm_policy(adev, PP_PM_POLICY_XGMI_PLPD, XGMI_PLPD_DEFAULT);
+ if (ret1 && ret1 != -EOPNOTSUPP)
+ dev_warn(adev->dev, "Failed to allow XGMI power down");
+
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
+ dev_warn(adev->dev, "Failed to allow df cstate");
+
+ return ret2;
+}
+
+struct amdgpu_ras_block_hw_ops xgmi_ras_hw_ops = {
+ .query_ras_error_count = amdgpu_xgmi_query_ras_error_count,
+ .reset_ras_error_count = amdgpu_xgmi_reset_ras_error_count,
+ .ras_error_inject = amdgpu_ras_error_inject_xgmi,
+};
+
+struct amdgpu_xgmi_ras xgmi_ras = {
+ .ras_block = {
+ .hw_ops = &xgmi_ras_hw_ops,
+ .ras_late_init = amdgpu_xgmi_ras_late_init,
+ },
+};
+
+int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err;
+ struct amdgpu_xgmi_ras *ras;
+
+ if (!adev->gmc.xgmi.ras)
+ return 0;
+
+ ras = adev->gmc.xgmi.ras;
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register xgmi_wafl_pcs ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "xgmi_wafl");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->gmc.xgmi.ras_if = &ras->ras_block.ras_comm;
+
+ return 0;
+}
+
+static void amdgpu_xgmi_reset_on_init_work(struct work_struct *work)
+{
+ struct amdgpu_hive_info *hive =
+ container_of(work, struct amdgpu_hive_info, reset_on_init_work);
+ struct amdgpu_reset_context reset_context;
+ struct amdgpu_device *tmp_adev;
+ struct list_head device_list;
+ int r;
+
+ mutex_lock(&hive->hive_lock);
+
+ INIT_LIST_HEAD(&device_list);
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
+ list_add_tail(&tmp_adev->reset_list, &device_list);
+
+ tmp_adev = list_first_entry(&device_list, struct amdgpu_device,
+ reset_list);
+ amdgpu_device_lock_reset_domain(tmp_adev->reset_domain);
+
+ reset_context.method = AMD_RESET_METHOD_ON_INIT;
+ reset_context.reset_req_dev = tmp_adev;
+ reset_context.hive = hive;
+ reset_context.reset_device_list = &device_list;
+ set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
+
+ amdgpu_reset_do_xgmi_reset_on_init(&reset_context);
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain);
+
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ r = amdgpu_ras_init_badpage_info(tmp_adev);
+ if (r && r != -EHWPOISON)
+ dev_err(tmp_adev->dev,
+ "error during bad page data initialization");
+ }
+}
+
+static void amdgpu_xgmi_schedule_reset_on_init(struct amdgpu_hive_info *hive)
+{
+ INIT_WORK(&hive->reset_on_init_work, amdgpu_xgmi_reset_on_init_work);
+ amdgpu_reset_domain_schedule(hive->reset_domain,
+ &hive->reset_on_init_work);
+}
+
+int amdgpu_xgmi_reset_on_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_hive_info *hive;
+ bool reset_scheduled;
+ int num_devs;
+
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (!hive)
+ return -EINVAL;
+
+ mutex_lock(&hive->hive_lock);
+ num_devs = atomic_read(&hive->number_devices);
+ reset_scheduled = false;
+ if (num_devs == adev->gmc.xgmi.num_physical_nodes) {
+ amdgpu_xgmi_schedule_reset_on_init(hive);
+ reset_scheduled = true;
+ }
+
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
+
+ if (reset_scheduled)
+ flush_work(&hive->reset_on_init_work);
+
+ return 0;
+}
+
+int amdgpu_xgmi_request_nps_change(struct amdgpu_device *adev,
+ struct amdgpu_hive_info *hive,
+ int req_nps_mode)
+{
+ struct amdgpu_device *tmp_adev;
+ int cur_nps_mode, r;
+
+ /* This is expected to be called only during unload of driver. The
+ * request needs to be placed only once for all devices in the hive. If
+ * one of them fail, revert the request for previous successful devices.
+ * After placing the request, make hive mode as UNKNOWN so that other
+ * devices don't request anymore.
+ */
+ mutex_lock(&hive->hive_lock);
+ if (atomic_read(&hive->requested_nps_mode) ==
+ UNKNOWN_MEMORY_PARTITION_MODE) {
+ dev_dbg(adev->dev, "Unexpected entry for hive NPS change");
+ mutex_unlock(&hive->hive_lock);
+ return 0;
+ }
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+ r = adev->gmc.gmc_funcs->request_mem_partition_mode(
+ tmp_adev, req_nps_mode);
+ if (r)
+ break;
+ }
+ if (r) {
+ /* Request back current mode if one of the requests failed */
+ cur_nps_mode =
+ adev->gmc.gmc_funcs->query_mem_partition_mode(tmp_adev);
+ list_for_each_entry_continue_reverse(
+ tmp_adev, &hive->device_list, gmc.xgmi.head)
+ adev->gmc.gmc_funcs->request_mem_partition_mode(
+ tmp_adev, cur_nps_mode);
+ }
+ /* Set to UNKNOWN so that other devices don't request anymore */
+ atomic_set(&hive->requested_nps_mode, UNKNOWN_MEMORY_PARTITION_MODE);
+ mutex_unlock(&hive->hive_lock);
+
+ return r;
+}
+
+bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
+ struct amdgpu_device *bo_adev)
+{
+ return (amdgpu_use_xgmi_p2p && adev != bo_adev &&
+ adev->gmc.xgmi.hive_id &&
+ adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id);
+}
+
+void amdgpu_xgmi_early_init(struct amdgpu_device *adev)
+{
+ if (!adev->gmc.xgmi.supported)
+ return;
+
+ switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ /* 25 GT/s */
+ adev->gmc.xgmi.max_speed = 25;
+ adev->gmc.xgmi.max_width = 16;
+ break;
+ case IP_VERSION(9, 4, 3):
+ case IP_VERSION(9, 4, 4):
+ case IP_VERSION(9, 5, 0):
+ /* 32 GT/s */
+ adev->gmc.xgmi.max_speed = 32;
+ adev->gmc.xgmi.max_width = 16;
+ break;
+ default:
+ break;
+ }
+}
+
+void amgpu_xgmi_set_max_speed_width(struct amdgpu_device *adev,
+ uint16_t max_speed, uint8_t max_width)
+{
+ adev->gmc.xgmi.max_speed = max_speed;
+ adev->gmc.xgmi.max_width = max_width;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
new file mode 100644
index 000000000000..5f36aff17e79
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __AMDGPU_XGMI_H__
+#define __AMDGPU_XGMI_H__
+
+#include <drm/task_barrier.h>
+#include "amdgpu_ras.h"
+
+struct amdgpu_hive_info {
+ struct kobject kobj;
+ uint64_t hive_id;
+ struct list_head device_list;
+ struct list_head node;
+ atomic_t number_devices;
+ struct mutex hive_lock;
+ int hi_req_count;
+ struct amdgpu_device *hi_req_gpu;
+ struct task_barrier tb;
+ enum {
+ AMDGPU_XGMI_PSTATE_MIN,
+ AMDGPU_XGMI_PSTATE_MAX_VEGA20,
+ AMDGPU_XGMI_PSTATE_UNKNOWN
+ } pstate;
+
+ struct amdgpu_reset_domain *reset_domain;
+ atomic_t ras_recovery;
+ struct ras_event_manager event_mgr;
+ struct work_struct reset_on_init_work;
+ atomic_t requested_nps_mode;
+};
+
+struct amdgpu_pcs_ras_field {
+ const char *err_name;
+ uint32_t pcs_err_mask;
+ uint32_t pcs_err_shift;
+};
+
+/**
+ * Bandwidth range reporting comes in two modes.
+ *
+ * PER_LINK - range for any xgmi link
+ * PER_PEER - range of max of single xgmi link to max of multiple links based on source peer
+ */
+enum amdgpu_xgmi_bw_mode {
+ AMDGPU_XGMI_BW_MODE_PER_LINK = 0,
+ AMDGPU_XGMI_BW_MODE_PER_PEER
+};
+
+enum amdgpu_xgmi_bw_unit {
+ AMDGPU_XGMI_BW_UNIT_GBYTES = 0,
+ AMDGPU_XGMI_BW_UNIT_MBYTES
+};
+
+struct amdgpu_xgmi_ras {
+ struct amdgpu_ras_block_object ras_block;
+};
+extern struct amdgpu_xgmi_ras xgmi_ras;
+
+struct amdgpu_xgmi {
+ /* from psp */
+ u64 node_id;
+ u64 hive_id;
+ /* fixed per family */
+ u64 node_segment_size;
+ /* physical node (0-3) */
+ unsigned physical_node_id;
+ /* number of nodes (0-4) */
+ unsigned num_physical_nodes;
+ /* gpu list in the same hive */
+ struct list_head head;
+ bool supported;
+ struct ras_common_if *ras_if;
+ bool connected_to_cpu;
+ struct amdgpu_xgmi_ras *ras;
+ uint16_t max_speed;
+ uint8_t max_width;
+};
+
+struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev);
+void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive);
+int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
+int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
+int amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
+int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
+int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev, struct amdgpu_device *peer_adev);
+int amdgpu_xgmi_get_bandwidth(struct amdgpu_device *adev, struct amdgpu_device *peer_adev,
+ enum amdgpu_xgmi_bw_mode bw_mode, enum amdgpu_xgmi_bw_unit bw_unit,
+ uint32_t *min_bw, uint32_t *max_bw);
+bool amdgpu_xgmi_get_is_sharing_enabled(struct amdgpu_device *adev,
+ struct amdgpu_device *peer_adev);
+uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
+ uint64_t addr);
+bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
+ struct amdgpu_device *bo_adev);
+int amdgpu_xgmi_ras_sw_init(struct amdgpu_device *adev);
+int amdgpu_xgmi_reset_on_init(struct amdgpu_device *adev);
+
+int amdgpu_xgmi_request_nps_change(struct amdgpu_device *adev,
+ struct amdgpu_hive_info *hive,
+ int req_nps_mode);
+int amdgpu_get_xgmi_link_status(struct amdgpu_device *adev,
+ int global_link_num);
+int amdgpu_xgmi_get_ext_link(struct amdgpu_device *adev, int link_num);
+
+void amdgpu_xgmi_early_init(struct amdgpu_device *adev);
+uint32_t amdgpu_xgmi_get_max_bandwidth(struct amdgpu_device *adev);
+
+void amgpu_xgmi_set_max_speed_width(struct amdgpu_device *adev,
+ uint16_t max_speed, uint8_t max_width);
+
+/* Cleanup macro for use with __free(xgmi_put_hive) */
+DEFINE_FREE(xgmi_put_hive, struct amdgpu_hive_info *, if (_T) amdgpu_put_xgmi_hive(_T))
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
new file mode 100644
index 000000000000..3a79ed7d8031
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -0,0 +1,456 @@
+/*
+ * Copyright (c) 2018-2021 Advanced Micro Devices, Inc. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#ifndef AMDGV_SRIOV_MSG__H_
+#define AMDGV_SRIOV_MSG__H_
+
+/* unit in kilobytes */
+#define AMD_SRIOV_MSG_VBIOS_OFFSET 0
+#define AMD_SRIOV_MSG_VBIOS_SIZE_KB 64
+#define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB AMD_SRIOV_MSG_VBIOS_SIZE_KB
+#define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB 4
+#define AMD_SRIOV_MSG_TMR_OFFSET_KB 2048
+#define AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB 2
+#define AMD_SRIOV_RAS_TELEMETRY_SIZE_KB 64
+/*
+ * layout
+ * 0 64KB 65KB 66KB 68KB 132KB
+ * | VBIOS | PF2VF | VF2PF | Bad Page | RAS Telemetry Region | ...
+ * | 64KB | 1KB | 1KB | 2KB | 64KB | ...
+ */
+
+#define AMD_SRIOV_MSG_SIZE_KB 1
+#define AMD_SRIOV_MSG_PF2VF_OFFSET_KB AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB
+#define AMD_SRIOV_MSG_VF2PF_OFFSET_KB (AMD_SRIOV_MSG_PF2VF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB)
+#define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB (AMD_SRIOV_MSG_VF2PF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB)
+#define AMD_SRIOV_MSG_RAS_TELEMETRY_OFFSET_KB (AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB + AMD_SRIOV_MSG_BAD_PAGE_SIZE_KB)
+
+/*
+ * PF2VF history log:
+ * v1 defined in amdgim
+ * v2 current
+ *
+ * VF2PF history log:
+ * v1 defined in amdgim
+ * v2 defined in amdgim
+ * v3 current
+ */
+#define AMD_SRIOV_MSG_FW_VRAM_PF2VF_VER 2
+#define AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER 3
+
+#define AMD_SRIOV_MSG_RESERVE_UCODE 24
+
+#define AMD_SRIOV_MSG_RESERVE_VCN_INST 4
+
+enum amd_sriov_ucode_engine_id {
+ AMD_SRIOV_UCODE_ID_VCE = 0,
+ AMD_SRIOV_UCODE_ID_UVD,
+ AMD_SRIOV_UCODE_ID_MC,
+ AMD_SRIOV_UCODE_ID_ME,
+ AMD_SRIOV_UCODE_ID_PFP,
+ AMD_SRIOV_UCODE_ID_CE,
+ AMD_SRIOV_UCODE_ID_RLC,
+ AMD_SRIOV_UCODE_ID_RLC_SRLC,
+ AMD_SRIOV_UCODE_ID_RLC_SRLG,
+ AMD_SRIOV_UCODE_ID_RLC_SRLS,
+ AMD_SRIOV_UCODE_ID_MEC,
+ AMD_SRIOV_UCODE_ID_MEC2,
+ AMD_SRIOV_UCODE_ID_SOS,
+ AMD_SRIOV_UCODE_ID_ASD,
+ AMD_SRIOV_UCODE_ID_TA_RAS,
+ AMD_SRIOV_UCODE_ID_TA_XGMI,
+ AMD_SRIOV_UCODE_ID_SMC,
+ AMD_SRIOV_UCODE_ID_SDMA,
+ AMD_SRIOV_UCODE_ID_SDMA2,
+ AMD_SRIOV_UCODE_ID_VCN,
+ AMD_SRIOV_UCODE_ID_DMCU,
+ AMD_SRIOV_UCODE_ID__MAX
+};
+
+#pragma pack(push, 1) // PF2VF / VF2PF data areas are byte packed
+
+union amd_sriov_msg_feature_flags {
+ struct {
+ uint32_t error_log_collect : 1;
+ uint32_t host_load_ucodes : 1;
+ uint32_t host_flr_vramlost : 1;
+ uint32_t mm_bw_management : 1;
+ uint32_t pp_one_vf_mode : 1;
+ uint32_t reg_indirect_acc : 1;
+ uint32_t av1_support : 1;
+ uint32_t vcn_rb_decouple : 1;
+ uint32_t mes_info_dump_enable : 1;
+ uint32_t ras_caps : 1;
+ uint32_t ras_telemetry : 1;
+ uint32_t ras_cper : 1;
+ uint32_t reserved : 20;
+ } flags;
+ uint32_t all;
+};
+
+union amd_sriov_reg_access_flags {
+ struct {
+ uint32_t vf_reg_access_ih : 1;
+ uint32_t vf_reg_access_mmhub : 1;
+ uint32_t vf_reg_access_gc : 1;
+ uint32_t vf_reg_access_l1_tlb_cntl : 1;
+ uint32_t vf_reg_access_sq_config : 1;
+ uint32_t reserved : 27;
+ } flags;
+ uint32_t all;
+};
+
+union amd_sriov_ras_caps {
+ struct {
+ uint64_t block_umc : 1;
+ uint64_t block_sdma : 1;
+ uint64_t block_gfx : 1;
+ uint64_t block_mmhub : 1;
+ uint64_t block_athub : 1;
+ uint64_t block_pcie_bif : 1;
+ uint64_t block_hdp : 1;
+ uint64_t block_xgmi_wafl : 1;
+ uint64_t block_df : 1;
+ uint64_t block_smn : 1;
+ uint64_t block_sem : 1;
+ uint64_t block_mp0 : 1;
+ uint64_t block_mp1 : 1;
+ uint64_t block_fuse : 1;
+ uint64_t block_mca : 1;
+ uint64_t block_vcn : 1;
+ uint64_t block_jpeg : 1;
+ uint64_t block_ih : 1;
+ uint64_t block_mpio : 1;
+ uint64_t poison_propogation_mode : 1;
+ uint64_t reserved : 44;
+ } bits;
+ uint64_t all;
+};
+
+union amd_sriov_msg_os_info {
+ struct {
+ uint32_t windows : 1;
+ uint32_t reserved : 31;
+ } info;
+ uint32_t all;
+};
+
+struct amd_sriov_msg_uuid_info {
+ union {
+ struct {
+ uint32_t did : 16;
+ uint32_t fcn : 8;
+ uint32_t asic_7 : 8;
+ };
+ uint32_t time_low;
+ };
+
+ struct {
+ uint32_t time_mid : 16;
+ uint32_t time_high : 12;
+ uint32_t version : 4;
+ };
+
+ struct {
+ struct {
+ uint8_t clk_seq_hi : 6;
+ uint8_t variant : 2;
+ };
+ union {
+ uint8_t clk_seq_low;
+ uint8_t asic_6;
+ };
+ uint16_t asic_4;
+ };
+
+ uint32_t asic_0;
+};
+
+struct amd_sriov_msg_pf2vf_info_header {
+ /* the total structure size in byte */
+ uint32_t size;
+ /* version of this structure, written by the HOST */
+ uint32_t version;
+ /* reserved */
+ uint32_t reserved[2];
+};
+
+#define AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE (55)
+struct amd_sriov_msg_pf2vf_info {
+ /* header contains size and version */
+ struct amd_sriov_msg_pf2vf_info_header header;
+ /* use private key from mailbox 2 to create checksum */
+ uint32_t checksum;
+ /* The features flags of the HOST driver supports */
+ union amd_sriov_msg_feature_flags feature_flags;
+ /* (max_width * max_height * fps) / (16 * 16) */
+ uint32_t hevc_enc_max_mb_per_second;
+ /* (max_width * max_height) / (16 * 16) */
+ uint32_t hevc_enc_max_mb_per_frame;
+ /* (max_width * max_height * fps) / (16 * 16) */
+ uint32_t avc_enc_max_mb_per_second;
+ /* (max_width * max_height) / (16 * 16) */
+ uint32_t avc_enc_max_mb_per_frame;
+ /* MEC FW position in BYTE from the start of VF visible frame buffer */
+ uint64_t mecfw_offset;
+ /* MEC FW size in BYTE */
+ uint32_t mecfw_size;
+ /* UVD FW position in BYTE from the start of VF visible frame buffer */
+ uint64_t uvdfw_offset;
+ /* UVD FW size in BYTE */
+ uint32_t uvdfw_size;
+ /* VCE FW position in BYTE from the start of VF visible frame buffer */
+ uint64_t vcefw_offset;
+ /* VCE FW size in BYTE */
+ uint32_t vcefw_size;
+ /* Bad pages block position in BYTE */
+ uint32_t bp_block_offset_low;
+ uint32_t bp_block_offset_high;
+ /* Bad pages block size in BYTE */
+ uint32_t bp_block_size;
+ /* frequency for VF to update the VF2PF area in msec, 0 = manual */
+ uint32_t vf2pf_update_interval_ms;
+ /* identification in ROCm SMI */
+ uint64_t uuid;
+ uint32_t fcn_idx;
+ /* flags to indicate which register access method VF should use */
+ union amd_sriov_reg_access_flags reg_access_flags;
+ /* MM BW management */
+ struct {
+ uint32_t decode_max_dimension_pixels;
+ uint32_t decode_max_frame_pixels;
+ uint32_t encode_max_dimension_pixels;
+ uint32_t encode_max_frame_pixels;
+ } mm_bw_management[AMD_SRIOV_MSG_RESERVE_VCN_INST];
+ /* UUID info */
+ struct amd_sriov_msg_uuid_info uuid_info;
+ /* PCIE atomic ops support flag */
+ uint32_t pcie_atomic_ops_support_flags;
+ /* Portion of GPU memory occupied by VF. MAX value is 65535, but set to uint32_t to maintain alignment with reserved size */
+ uint32_t gpu_capacity;
+ /* vf bdf on host pci tree for debug only */
+ uint32_t bdf_on_host;
+ uint32_t more_bp; //Reserved for future use.
+ union amd_sriov_ras_caps ras_en_caps;
+ union amd_sriov_ras_caps ras_telemetry_en_caps;
+
+ /* reserved */
+ uint32_t reserved[256 - AMD_SRIOV_MSG_PF2VF_INFO_FILLED_SIZE];
+} __packed;
+
+struct amd_sriov_msg_vf2pf_info_header {
+ /* the total structure size in byte */
+ uint32_t size;
+ /* version of this structure, written by the guest */
+ uint32_t version;
+ /* reserved */
+ uint32_t reserved[2];
+};
+
+#define AMD_SRIOV_MSG_VF2PF_INFO_FILLED_SIZE (73)
+struct amd_sriov_msg_vf2pf_info {
+ /* header contains size and version */
+ struct amd_sriov_msg_vf2pf_info_header header;
+ uint32_t checksum;
+ /* driver version */
+ uint8_t driver_version[64];
+ /* driver certification, 1=WHQL, 0=None */
+ uint32_t driver_cert;
+ /* guest OS type and version */
+ union amd_sriov_msg_os_info os_info;
+ /* guest fb information in the unit of MB */
+ uint32_t fb_usage;
+ /* guest gfx engine usage percentage */
+ uint32_t gfx_usage;
+ /* guest gfx engine health percentage */
+ uint32_t gfx_health;
+ /* guest compute engine usage percentage */
+ uint32_t compute_usage;
+ /* guest compute engine health percentage */
+ uint32_t compute_health;
+ /* guest avc engine usage percentage. 0xffff means N/A */
+ uint32_t avc_enc_usage;
+ /* guest avc engine health percentage. 0xffff means N/A */
+ uint32_t avc_enc_health;
+ /* guest hevc engine usage percentage. 0xffff means N/A */
+ uint32_t hevc_enc_usage;
+ /* guest hevc engine usage percentage. 0xffff means N/A */
+ uint32_t hevc_enc_health;
+ /* combined encode/decode usage */
+ uint32_t encode_usage;
+ uint32_t decode_usage;
+ /* Version of PF2VF that VF understands */
+ uint32_t pf2vf_version_required;
+ /* additional FB usage */
+ uint32_t fb_vis_usage;
+ uint32_t fb_vis_size;
+ uint32_t fb_size;
+ /* guest ucode data, each one is 1.25 Dword */
+ struct {
+ uint8_t id;
+ uint32_t version;
+ } ucode_info[AMD_SRIOV_MSG_RESERVE_UCODE];
+ uint64_t dummy_page_addr;
+ /* FB allocated for guest MES to record UQ info */
+ uint64_t mes_info_addr;
+ uint32_t mes_info_size;
+ /* reserved */
+ uint32_t reserved[256 - AMD_SRIOV_MSG_VF2PF_INFO_FILLED_SIZE];
+} __packed;
+
+/* mailbox message send from guest to host */
+enum amd_sriov_mailbox_request_message {
+ MB_REQ_MSG_REQ_GPU_INIT_ACCESS = 1,
+ MB_REQ_MSG_REL_GPU_INIT_ACCESS,
+ MB_REQ_MSG_REQ_GPU_FINI_ACCESS,
+ MB_REQ_MSG_REL_GPU_FINI_ACCESS,
+ MB_REQ_MSG_REQ_GPU_RESET_ACCESS,
+ MB_REQ_MSG_REQ_GPU_INIT_DATA,
+ MB_REQ_MSG_PSP_VF_CMD_RELAY,
+
+ MB_REQ_MSG_LOG_VF_ERROR = 200,
+ MB_REQ_MSG_READY_TO_RESET = 201,
+ MB_REQ_MSG_RAS_POISON = 202,
+ MB_REQ_RAS_ERROR_COUNT = 203,
+ MB_REQ_RAS_CPER_DUMP = 204,
+ MB_REQ_RAS_BAD_PAGES = 205,
+};
+
+/* mailbox message send from host to guest */
+enum amd_sriov_mailbox_response_message {
+ MB_RES_MSG_CLR_MSG_BUF = 0,
+ MB_RES_MSG_READY_TO_ACCESS_GPU = 1,
+ MB_RES_MSG_FLR_NOTIFICATION = 2,
+ MB_RES_MSG_FLR_NOTIFICATION_COMPLETION = 3,
+ MB_RES_MSG_SUCCESS = 4,
+ MB_RES_MSG_FAIL = 5,
+ MB_RES_MSG_QUERY_ALIVE = 6,
+ MB_RES_MSG_GPU_INIT_DATA_READY = 7,
+ MB_RES_MSG_RAS_POISON_READY = 8,
+ MB_RES_MSG_PF_SOFT_FLR_NOTIFICATION = 9,
+ MB_RES_MSG_GPU_RMA = 10,
+ MB_RES_MSG_RAS_ERROR_COUNT_READY = 11,
+ MB_REQ_RAS_CPER_DUMP_READY = 14,
+ MB_RES_MSG_RAS_BAD_PAGES_READY = 15,
+ MB_RES_MSG_RAS_BAD_PAGES_NOTIFICATION = 16,
+ MB_RES_MSG_UNRECOV_ERR_NOTIFICATION = 17,
+ MB_RES_MSG_TEXT_MESSAGE = 255
+};
+
+enum amd_sriov_ras_telemetry_gpu_block {
+ RAS_TELEMETRY_GPU_BLOCK_UMC = 0,
+ RAS_TELEMETRY_GPU_BLOCK_SDMA = 1,
+ RAS_TELEMETRY_GPU_BLOCK_GFX = 2,
+ RAS_TELEMETRY_GPU_BLOCK_MMHUB = 3,
+ RAS_TELEMETRY_GPU_BLOCK_ATHUB = 4,
+ RAS_TELEMETRY_GPU_BLOCK_PCIE_BIF = 5,
+ RAS_TELEMETRY_GPU_BLOCK_HDP = 6,
+ RAS_TELEMETRY_GPU_BLOCK_XGMI_WAFL = 7,
+ RAS_TELEMETRY_GPU_BLOCK_DF = 8,
+ RAS_TELEMETRY_GPU_BLOCK_SMN = 9,
+ RAS_TELEMETRY_GPU_BLOCK_SEM = 10,
+ RAS_TELEMETRY_GPU_BLOCK_MP0 = 11,
+ RAS_TELEMETRY_GPU_BLOCK_MP1 = 12,
+ RAS_TELEMETRY_GPU_BLOCK_FUSE = 13,
+ RAS_TELEMETRY_GPU_BLOCK_MCA = 14,
+ RAS_TELEMETRY_GPU_BLOCK_VCN = 15,
+ RAS_TELEMETRY_GPU_BLOCK_JPEG = 16,
+ RAS_TELEMETRY_GPU_BLOCK_IH = 17,
+ RAS_TELEMETRY_GPU_BLOCK_MPIO = 18,
+ RAS_TELEMETRY_GPU_BLOCK_COUNT = 19,
+};
+
+struct amd_sriov_ras_telemetry_header {
+ uint32_t checksum;
+ uint32_t used_size;
+ uint32_t reserved[2];
+};
+
+struct amd_sriov_ras_telemetry_error_count {
+ struct {
+ uint32_t ce_count;
+ uint32_t ue_count;
+ uint32_t de_count;
+ uint32_t ce_overflow_count;
+ uint32_t ue_overflow_count;
+ uint32_t de_overflow_count;
+ uint32_t reserved[6];
+ } block[RAS_TELEMETRY_GPU_BLOCK_COUNT];
+};
+
+struct amd_sriov_ras_cper_dump {
+ uint32_t more;
+ uint64_t overflow_count;
+ uint64_t count;
+ uint64_t wptr;
+ uint32_t buf[];
+};
+
+struct amd_sriov_ras_chk_criti {
+ uint32_t hit;
+};
+
+struct amdsriov_ras_telemetry {
+ struct amd_sriov_ras_telemetry_header header;
+
+ union {
+ struct amd_sriov_ras_telemetry_error_count error_count;
+ struct amd_sriov_ras_cper_dump cper_dump;
+ struct amd_sriov_ras_chk_criti chk_criti;
+ } body;
+};
+
+/* version data stored in MAILBOX_MSGBUF_RCV_DW1 for future expansion */
+enum amd_sriov_gpu_init_data_version {
+ GPU_INIT_DATA_READY_V1 = 1,
+};
+
+#pragma pack(pop) // Restore previous packing option
+
+/* checksum function between host and guest */
+unsigned int amd_sriov_msg_checksum(void *obj, unsigned long obj_size, unsigned int key,
+ unsigned int checksum);
+
+/* assertion at compile time */
+#ifdef __linux__
+#define stringification(s) _stringification(s)
+#define _stringification(s) #s
+
+_Static_assert(
+ sizeof(struct amd_sriov_msg_vf2pf_info) == AMD_SRIOV_MSG_SIZE_KB << 10,
+ "amd_sriov_msg_vf2pf_info must be " stringification(AMD_SRIOV_MSG_SIZE_KB) " KB");
+
+_Static_assert(
+ sizeof(struct amd_sriov_msg_pf2vf_info) == AMD_SRIOV_MSG_SIZE_KB << 10,
+ "amd_sriov_msg_pf2vf_info must be " stringification(AMD_SRIOV_MSG_SIZE_KB) " KB");
+
+_Static_assert(AMD_SRIOV_MSG_RESERVE_UCODE % 4 == 0,
+ "AMD_SRIOV_MSG_RESERVE_UCODE must be multiple of 4");
+
+_Static_assert(AMD_SRIOV_MSG_RESERVE_UCODE > AMD_SRIOV_UCODE_ID__MAX,
+ "AMD_SRIOV_MSG_RESERVE_UCODE must be bigger than AMD_SRIOV_UCODE_ID__MAX");
+
+#undef _stringification
+#undef stringification
+#endif
+
+#endif /* AMDGV_SRIOV_MSG__H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
new file mode 100644
index 000000000000..811124ff88a8
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
@@ -0,0 +1,985 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "soc15.h"
+
+#include "soc15_common.h"
+#include "amdgpu_reg_state.h"
+#include "amdgpu_xcp.h"
+#include "gfx_v9_4_3.h"
+#include "gfxhub_v1_2.h"
+#include "sdma_v4_4_2.h"
+#include "amdgpu_ip.h"
+
+#define XCP_INST_MASK(num_inst, xcp_id) \
+ (num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0)
+
+void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev)
+{
+ int i;
+
+ adev->doorbell_index.kiq = AMDGPU_DOORBELL_LAYOUT1_KIQ_START;
+
+ adev->doorbell_index.mec_ring0 = AMDGPU_DOORBELL_LAYOUT1_MEC_RING_START;
+
+ adev->doorbell_index.userqueue_start = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_START;
+ adev->doorbell_index.userqueue_end = AMDGPU_DOORBELL_LAYOUT1_USERQUEUE_END;
+ adev->doorbell_index.xcc_doorbell_range = AMDGPU_DOORBELL_LAYOUT1_XCC_RANGE;
+
+ adev->doorbell_index.sdma_doorbell_range = 20;
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ adev->doorbell_index.sdma_engine[i] =
+ AMDGPU_DOORBELL_LAYOUT1_sDMA_ENGINE_START +
+ i * (adev->doorbell_index.sdma_doorbell_range >> 1);
+
+ adev->doorbell_index.ih = AMDGPU_DOORBELL_LAYOUT1_IH;
+ adev->doorbell_index.vcn.vcn_ring0_1 = AMDGPU_DOORBELL_LAYOUT1_VCN_START;
+
+ adev->doorbell_index.first_non_cp = AMDGPU_DOORBELL_LAYOUT1_FIRST_NON_CP;
+ adev->doorbell_index.last_non_cp = AMDGPU_DOORBELL_LAYOUT1_LAST_NON_CP;
+
+ adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1;
+}
+
+/* Fixed pattern for smn addressing on different AIDs:
+ * bit[34]: indicate cross AID access
+ * bit[33:32]: indicate target AID id
+ * AID id range is 0 ~ 3 as maximum AID number is 4.
+ */
+u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id)
+{
+ u64 ext_offset;
+
+ /* local routing and bit[34:32] will be zeros */
+ if (ext_id == 0)
+ return 0;
+
+ /* Initiated from host, accessing to all non-zero aids are cross traffic */
+ ext_offset = ((u64)(ext_id & 0x3) << 32) | (1ULL << 34);
+
+ return ext_offset;
+}
+
+static enum amdgpu_gfx_partition
+__aqua_vanjaram_calc_xcp_mode(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ struct amdgpu_device *adev = xcp_mgr->adev;
+ int num_xcc, num_xcc_per_xcp = 0, mode = 0;
+
+ num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
+ if (adev->gfx.funcs->get_xccs_per_xcp)
+ num_xcc_per_xcp = adev->gfx.funcs->get_xccs_per_xcp(adev);
+ if ((num_xcc_per_xcp) && (num_xcc % num_xcc_per_xcp == 0))
+ mode = num_xcc / num_xcc_per_xcp;
+
+ if (num_xcc_per_xcp == 1)
+ return AMDGPU_CPX_PARTITION_MODE;
+
+ switch (mode) {
+ case 1:
+ return AMDGPU_SPX_PARTITION_MODE;
+ case 2:
+ return AMDGPU_DPX_PARTITION_MODE;
+ case 3:
+ return AMDGPU_TPX_PARTITION_MODE;
+ case 4:
+ return AMDGPU_QPX_PARTITION_MODE;
+ default:
+ return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
+ }
+
+ return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
+}
+
+static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ enum amdgpu_gfx_partition derv_mode,
+ mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
+ struct amdgpu_device *adev = xcp_mgr->adev;
+
+ derv_mode = __aqua_vanjaram_calc_xcp_mode(xcp_mgr);
+
+ if (amdgpu_sriov_vf(adev))
+ return derv_mode;
+
+ if (adev->nbio.funcs->get_compute_partition_mode) {
+ mode = adev->nbio.funcs->get_compute_partition_mode(adev);
+ if (mode != derv_mode) {
+ dev_warn(
+ adev->dev,
+ "Mismatch in compute partition mode - reported : %d derived : %d",
+ mode, derv_mode);
+ if (derv_mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
+ amdgpu_device_bus_status_check(adev);
+ }
+ }
+
+ return mode;
+}
+
+static int __aqua_vanjaram_get_xcc_per_xcp(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
+{
+ int num_xcc, num_xcc_per_xcp = 0;
+
+ num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
+
+ switch (mode) {
+ case AMDGPU_SPX_PARTITION_MODE:
+ num_xcc_per_xcp = num_xcc;
+ break;
+ case AMDGPU_DPX_PARTITION_MODE:
+ num_xcc_per_xcp = num_xcc / 2;
+ break;
+ case AMDGPU_TPX_PARTITION_MODE:
+ num_xcc_per_xcp = num_xcc / 3;
+ break;
+ case AMDGPU_QPX_PARTITION_MODE:
+ num_xcc_per_xcp = num_xcc / 4;
+ break;
+ case AMDGPU_CPX_PARTITION_MODE:
+ num_xcc_per_xcp = 1;
+ break;
+ }
+
+ return num_xcc_per_xcp;
+}
+
+static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
+ enum AMDGPU_XCP_IP_BLOCK ip_id,
+ struct amdgpu_xcp_ip *ip)
+{
+ struct amdgpu_device *adev = xcp_mgr->adev;
+ int num_sdma, num_vcn, num_shared_vcn, num_xcp;
+ int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp;
+
+ num_sdma = adev->sdma.num_instances;
+ num_vcn = adev->vcn.num_vcn_inst;
+ num_shared_vcn = 1;
+
+ num_xcc_xcp = adev->gfx.num_xcc_per_xcp;
+ num_xcp = NUM_XCC(adev->gfx.xcc_mask) / num_xcc_xcp;
+
+ switch (xcp_mgr->mode) {
+ case AMDGPU_SPX_PARTITION_MODE:
+ case AMDGPU_DPX_PARTITION_MODE:
+ case AMDGPU_TPX_PARTITION_MODE:
+ case AMDGPU_QPX_PARTITION_MODE:
+ case AMDGPU_CPX_PARTITION_MODE:
+ num_sdma_xcp = DIV_ROUND_UP(num_sdma, num_xcp);
+ num_vcn_xcp = DIV_ROUND_UP(num_vcn, num_xcp);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (num_vcn && num_xcp > num_vcn)
+ num_shared_vcn = num_xcp / num_vcn;
+
+ switch (ip_id) {
+ case AMDGPU_XCP_GFXHUB:
+ ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
+ ip->ip_funcs = &gfxhub_v1_2_xcp_funcs;
+ break;
+ case AMDGPU_XCP_GFX:
+ ip->inst_mask = XCP_INST_MASK(num_xcc_xcp, xcp_id);
+ ip->ip_funcs = &gfx_v9_4_3_xcp_funcs;
+ break;
+ case AMDGPU_XCP_SDMA:
+ ip->inst_mask = XCP_INST_MASK(num_sdma_xcp, xcp_id);
+ ip->ip_funcs = &sdma_v4_4_2_xcp_funcs;
+ break;
+ case AMDGPU_XCP_VCN:
+ ip->inst_mask =
+ XCP_INST_MASK(num_vcn_xcp, xcp_id / num_shared_vcn);
+ /* TODO : Assign IP funcs */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ip->ip_id = ip_id;
+
+ return 0;
+}
+
+static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr,
+ int px_mode, int *num_xcp,
+ uint16_t *nps_modes)
+{
+ struct amdgpu_device *adev = xcp_mgr->adev;
+ uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
+
+ if (!num_xcp || !nps_modes || !(xcp_mgr->supp_xcp_modes & BIT(px_mode)))
+ return -EINVAL;
+
+ switch (px_mode) {
+ case AMDGPU_SPX_PARTITION_MODE:
+ *num_xcp = 1;
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE);
+ break;
+ case AMDGPU_DPX_PARTITION_MODE:
+ *num_xcp = 2;
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS2_PARTITION_MODE);
+ break;
+ case AMDGPU_TPX_PARTITION_MODE:
+ *num_xcp = 3;
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS4_PARTITION_MODE);
+ break;
+ case AMDGPU_QPX_PARTITION_MODE:
+ *num_xcp = 4;
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS4_PARTITION_MODE);
+ if (gc_ver == IP_VERSION(9, 5, 0))
+ *nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
+ break;
+ case AMDGPU_CPX_PARTITION_MODE:
+ *num_xcp = NUM_XCC(adev->gfx.xcc_mask);
+ *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) |
+ BIT(AMDGPU_NPS4_PARTITION_MODE);
+ if (gc_ver == IP_VERSION(9, 5, 0))
+ *nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr,
+ int mode,
+ struct amdgpu_xcp_cfg *xcp_cfg)
+{
+ struct amdgpu_device *adev = xcp_mgr->adev;
+ int max_res[AMDGPU_XCP_RES_MAX] = {};
+ bool res_lt_xcp;
+ int num_xcp, i, r;
+ u16 nps_modes;
+
+ if (!(xcp_mgr->supp_xcp_modes & BIT(mode)))
+ return -EINVAL;
+
+ max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask);
+ max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances;
+ max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst;
+ max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst;
+
+ r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp, &nps_modes);
+ if (r)
+ return r;
+
+ xcp_cfg->compatible_nps_modes =
+ (adev->gmc.supported_nps_modes & nps_modes);
+ xcp_cfg->num_res = ARRAY_SIZE(max_res);
+
+ for (i = 0; i < xcp_cfg->num_res; i++) {
+ res_lt_xcp = max_res[i] < num_xcp;
+ xcp_cfg->xcp_res[i].id = i;
+ xcp_cfg->xcp_res[i].num_inst =
+ res_lt_xcp ? 1 : max_res[i] / num_xcp;
+ xcp_cfg->xcp_res[i].num_inst =
+ i == AMDGPU_XCP_RES_JPEG ?
+ xcp_cfg->xcp_res[i].num_inst *
+ adev->jpeg.num_jpeg_rings : xcp_cfg->xcp_res[i].num_inst;
+ xcp_cfg->xcp_res[i].num_shared =
+ res_lt_xcp ? num_xcp / max_res[i] : 1;
+ }
+
+ return 0;
+}
+
+static enum amdgpu_gfx_partition
+__aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ struct amdgpu_device *adev = xcp_mgr->adev;
+ int num_xcc;
+
+ num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
+
+ if (adev->gmc.num_mem_partitions == 1)
+ return AMDGPU_SPX_PARTITION_MODE;
+
+ if (adev->gmc.num_mem_partitions == num_xcc)
+ return AMDGPU_CPX_PARTITION_MODE;
+
+ if (adev->gmc.num_mem_partitions == num_xcc / 2)
+ return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
+ AMDGPU_CPX_PARTITION_MODE;
+
+ if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
+ return AMDGPU_DPX_PARTITION_MODE;
+
+ return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
+}
+
+static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr,
+ enum amdgpu_gfx_partition mode)
+{
+ struct amdgpu_device *adev = xcp_mgr->adev;
+ int num_xcc, num_xccs_per_xcp, r;
+ int num_xcp, nps_mode;
+ u16 supp_nps_modes;
+ bool comp_mode;
+
+ nps_mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ r = __aqua_vanjaram_get_px_mode_info(xcp_mgr, mode, &num_xcp,
+ &supp_nps_modes);
+ if (r)
+ return false;
+
+ comp_mode = !!(BIT(nps_mode) & supp_nps_modes);
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+ switch (mode) {
+ case AMDGPU_SPX_PARTITION_MODE:
+ return comp_mode && num_xcc > 0;
+ case AMDGPU_DPX_PARTITION_MODE:
+ return comp_mode && (num_xcc % 4) == 0;
+ case AMDGPU_TPX_PARTITION_MODE:
+ return comp_mode && ((num_xcc % 3) == 0);
+ case AMDGPU_QPX_PARTITION_MODE:
+ num_xccs_per_xcp = num_xcc / 4;
+ return comp_mode && (num_xccs_per_xcp >= 2);
+ case AMDGPU_CPX_PARTITION_MODE:
+ return comp_mode && (num_xcc > 1);
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+static void __aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+ int mode;
+
+ xcp_mgr->avail_xcp_modes = 0;
+
+ for_each_inst(mode, xcp_mgr->supp_xcp_modes) {
+ if (__aqua_vanjaram_is_valid_mode(xcp_mgr, mode))
+ xcp_mgr->avail_xcp_modes |= BIT(mode);
+ }
+}
+
+static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr,
+ int mode, int *num_xcps)
+{
+ int num_xcc_per_xcp, num_xcc, ret;
+ struct amdgpu_device *adev;
+ u32 flags = 0;
+
+ adev = xcp_mgr->adev;
+ num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+
+ if (mode == AMDGPU_AUTO_COMPUTE_PARTITION_MODE) {
+ mode = __aqua_vanjaram_get_auto_mode(xcp_mgr);
+ if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) {
+ dev_err(adev->dev,
+ "Invalid config, no compatible compute partition mode found, available memory partitions: %d",
+ adev->gmc.num_mem_partitions);
+ return -EINVAL;
+ }
+ } else if (!__aqua_vanjaram_is_valid_mode(xcp_mgr, mode)) {
+ dev_err(adev->dev,
+ "Invalid compute partition mode requested, requested: %s, available memory partitions: %d",
+ amdgpu_gfx_compute_mode_desc(mode), adev->gmc.num_mem_partitions);
+ return -EINVAL;
+ }
+
+ if (adev->kfd.init_complete && !amdgpu_in_reset(adev))
+ flags |= AMDGPU_XCP_OPS_KFD;
+
+ if (flags & AMDGPU_XCP_OPS_KFD) {
+ ret = amdgpu_amdkfd_check_and_lock_kfd(adev);
+ if (ret)
+ goto out;
+ }
+
+ ret = amdgpu_xcp_pre_partition_switch(xcp_mgr, flags);
+ if (ret)
+ goto unlock;
+
+ num_xcc_per_xcp = __aqua_vanjaram_get_xcc_per_xcp(xcp_mgr, mode);
+ if (adev->gfx.funcs->switch_partition_mode)
+ adev->gfx.funcs->switch_partition_mode(xcp_mgr->adev,
+ num_xcc_per_xcp);
+
+ /* Init info about new xcps */
+ *num_xcps = num_xcc / num_xcc_per_xcp;
+ amdgpu_xcp_init(xcp_mgr, *num_xcps, mode);
+
+ ret = amdgpu_xcp_post_partition_switch(xcp_mgr, flags);
+ if (!ret)
+ __aqua_vanjaram_update_available_partition_mode(xcp_mgr);
+unlock:
+ if (flags & AMDGPU_XCP_OPS_KFD)
+ amdgpu_amdkfd_unlock_kfd(adev);
+out:
+ return ret;
+}
+
+static int __aqua_vanjaram_get_xcp_mem_id(struct amdgpu_device *adev,
+ int xcc_id, uint8_t *mem_id)
+{
+ /* memory/spatial modes validation check is already done */
+ *mem_id = xcc_id / adev->gfx.num_xcc_per_xcp;
+ *mem_id /= adev->xcp_mgr->num_xcp_per_mem_partition;
+
+ return 0;
+}
+
+static int aqua_vanjaram_get_xcp_mem_id(struct amdgpu_xcp_mgr *xcp_mgr,
+ struct amdgpu_xcp *xcp, uint8_t *mem_id)
+{
+ struct amdgpu_numa_info numa_info;
+ struct amdgpu_device *adev;
+ uint32_t xcc_mask;
+ int r, i, xcc_id;
+
+ adev = xcp_mgr->adev;
+ /* TODO: BIOS is not returning the right info now
+ * Check on this later
+ */
+ /*
+ if (adev->gmc.gmc_funcs->query_mem_partition_mode)
+ mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+ */
+ if (adev->gmc.num_mem_partitions == 1) {
+ /* Only one range */
+ *mem_id = 0;
+ return 0;
+ }
+
+ r = amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_GFX, &xcc_mask);
+ if (r || !xcc_mask)
+ return -EINVAL;
+
+ xcc_id = ffs(xcc_mask) - 1;
+ if (!adev->gmc.is_app_apu)
+ return __aqua_vanjaram_get_xcp_mem_id(adev, xcc_id, mem_id);
+
+ r = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info);
+
+ if (r)
+ return r;
+
+ r = -EINVAL;
+ for (i = 0; i < adev->gmc.num_mem_partitions; ++i) {
+ if (adev->gmc.mem_partitions[i].numa.node == numa_info.nid) {
+ *mem_id = i;
+ r = 0;
+ break;
+ }
+ }
+
+ return r;
+}
+
+static int aqua_vanjaram_get_xcp_ip_details(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id,
+ enum AMDGPU_XCP_IP_BLOCK ip_id,
+ struct amdgpu_xcp_ip *ip)
+{
+ if (!ip)
+ return -EINVAL;
+
+ return __aqua_vanjaram_get_xcp_ip_info(xcp_mgr, xcp_id, ip_id, ip);
+}
+
+struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = {
+ .switch_partition_mode = &aqua_vanjaram_switch_partition_mode,
+ .query_partition_mode = &aqua_vanjaram_query_partition_mode,
+ .get_ip_details = &aqua_vanjaram_get_xcp_ip_details,
+ .get_xcp_res_info = &aqua_vanjaram_get_xcp_res_info,
+ .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id,
+};
+
+static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
+{
+ int ret;
+
+ if (amdgpu_sriov_vf(adev))
+ aqua_vanjaram_xcp_funcs.switch_partition_mode = NULL;
+
+ ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1,
+ &aqua_vanjaram_xcp_funcs);
+ if (ret)
+ return ret;
+
+ amdgpu_xcp_update_supported_modes(adev->xcp_mgr);
+ /* TODO: Default memory node affinity init */
+
+ return ret;
+}
+
+int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev)
+{
+ u32 mask, avail_inst, inst_mask = adev->sdma.sdma_mask;
+ int ret, i;
+
+ /* generally 1 AID supports 4 instances */
+ adev->sdma.num_inst_per_aid = 4;
+ adev->sdma.num_instances = NUM_SDMA(adev->sdma.sdma_mask);
+
+ adev->aid_mask = i = 1;
+ inst_mask >>= adev->sdma.num_inst_per_aid;
+
+ for (mask = (1 << adev->sdma.num_inst_per_aid) - 1; inst_mask;
+ inst_mask >>= adev->sdma.num_inst_per_aid, ++i) {
+ avail_inst = inst_mask & mask;
+ if (avail_inst == mask || avail_inst == 0x3 ||
+ avail_inst == 0xc)
+ adev->aid_mask |= (1 << i);
+ }
+
+ /* Harvest config is not used for aqua vanjaram. VCN and JPEGs will be
+ * addressed based on logical instance ids.
+ */
+ adev->vcn.harvest_config = 0;
+ adev->vcn.num_inst_per_aid = 1;
+ adev->vcn.num_vcn_inst = hweight32(adev->vcn.inst_mask);
+ adev->jpeg.harvest_config = 0;
+ adev->jpeg.num_inst_per_aid = 1;
+ adev->jpeg.num_jpeg_inst = hweight32(adev->jpeg.inst_mask);
+
+ ret = aqua_vanjaram_xcp_mgr_init(adev);
+ if (ret)
+ return ret;
+
+ amdgpu_ip_map_init(adev);
+
+ return 0;
+}
+
+static void aqua_read_smn(struct amdgpu_device *adev,
+ struct amdgpu_smn_reg_data *regdata,
+ uint64_t smn_addr)
+{
+ regdata->addr = smn_addr;
+ regdata->value = RREG32_PCIE(smn_addr);
+}
+
+struct aqua_reg_list {
+ uint64_t start_addr;
+ uint32_t num_regs;
+ uint32_t incrx;
+};
+
+#define DW_ADDR_INCR 4
+
+static void aqua_read_smn_ext(struct amdgpu_device *adev,
+ struct amdgpu_smn_reg_data *regdata,
+ uint64_t smn_addr, int i)
+{
+ regdata->addr =
+ smn_addr + adev->asic_funcs->encode_ext_smn_addressing(i);
+ regdata->value = RREG32_PCIE_EXT(regdata->addr);
+}
+
+#define smnreg_0x1A340218 0x1A340218
+#define smnreg_0x1A3402E4 0x1A3402E4
+#define smnreg_0x1A340294 0x1A340294
+#define smreg_0x1A380088 0x1A380088
+
+#define NUM_PCIE_SMN_REGS 14
+
+static struct aqua_reg_list pcie_reg_addrs[] = {
+ { smnreg_0x1A340218, 1, 0 },
+ { smnreg_0x1A3402E4, 1, 0 },
+ { smnreg_0x1A340294, 6, DW_ADDR_INCR },
+ { smreg_0x1A380088, 6, DW_ADDR_INCR },
+};
+
+static ssize_t aqua_vanjaram_read_pcie_state(struct amdgpu_device *adev,
+ void *buf, size_t max_size)
+{
+ struct amdgpu_reg_state_pcie_v1_0 *pcie_reg_state;
+ uint32_t start_addr, incrx, num_regs, szbuf;
+ struct amdgpu_regs_pcie_v1_0 *pcie_regs;
+ struct amdgpu_smn_reg_data *reg_data;
+ struct pci_dev *us_pdev, *ds_pdev;
+ int aer_cap, r, n;
+
+ if (!buf || !max_size)
+ return -EINVAL;
+
+ pcie_reg_state = (struct amdgpu_reg_state_pcie_v1_0 *)buf;
+
+ szbuf = sizeof(*pcie_reg_state) +
+ amdgpu_reginst_size(1, sizeof(*pcie_regs), NUM_PCIE_SMN_REGS);
+ /* Only one instance of pcie regs */
+ if (max_size < szbuf)
+ return -EOVERFLOW;
+
+ pcie_regs = (struct amdgpu_regs_pcie_v1_0 *)((uint8_t *)buf +
+ sizeof(*pcie_reg_state));
+ pcie_regs->inst_header.instance = 0;
+ pcie_regs->inst_header.state = AMDGPU_INST_S_OK;
+ pcie_regs->inst_header.num_smn_regs = NUM_PCIE_SMN_REGS;
+
+ reg_data = pcie_regs->smn_reg_values;
+
+ for (r = 0; r < ARRAY_SIZE(pcie_reg_addrs); r++) {
+ start_addr = pcie_reg_addrs[r].start_addr;
+ incrx = pcie_reg_addrs[r].incrx;
+ num_regs = pcie_reg_addrs[r].num_regs;
+ for (n = 0; n < num_regs; n++) {
+ aqua_read_smn(adev, reg_data, start_addr + n * incrx);
+ ++reg_data;
+ }
+ }
+
+ ds_pdev = pci_upstream_bridge(adev->pdev);
+ us_pdev = pci_upstream_bridge(ds_pdev);
+
+ pcie_capability_read_word(us_pdev, PCI_EXP_DEVSTA,
+ &pcie_regs->device_status);
+ pcie_capability_read_word(us_pdev, PCI_EXP_LNKSTA,
+ &pcie_regs->link_status);
+
+ aer_cap = pci_find_ext_capability(us_pdev, PCI_EXT_CAP_ID_ERR);
+ if (aer_cap) {
+ pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_COR_STATUS,
+ &pcie_regs->pcie_corr_err_status);
+ pci_read_config_dword(us_pdev, aer_cap + PCI_ERR_UNCOR_STATUS,
+ &pcie_regs->pcie_uncorr_err_status);
+ }
+
+ pci_read_config_dword(us_pdev, PCI_PRIMARY_BUS,
+ &pcie_regs->sub_bus_number_latency);
+
+ pcie_reg_state->common_header.structure_size = szbuf;
+ pcie_reg_state->common_header.format_revision = 1;
+ pcie_reg_state->common_header.content_revision = 0;
+ pcie_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_PCIE;
+ pcie_reg_state->common_header.num_instances = 1;
+
+ return pcie_reg_state->common_header.structure_size;
+}
+
+#define smnreg_0x11A00050 0x11A00050
+#define smnreg_0x11A00180 0x11A00180
+#define smnreg_0x11A00070 0x11A00070
+#define smnreg_0x11A00200 0x11A00200
+#define smnreg_0x11A0020C 0x11A0020C
+#define smnreg_0x11A00210 0x11A00210
+#define smnreg_0x11A00108 0x11A00108
+
+#define XGMI_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
+
+#define NUM_XGMI_SMN_REGS 25
+
+static struct aqua_reg_list xgmi_reg_addrs[] = {
+ { smnreg_0x11A00050, 1, 0 },
+ { smnreg_0x11A00180, 16, DW_ADDR_INCR },
+ { smnreg_0x11A00070, 4, DW_ADDR_INCR },
+ { smnreg_0x11A00200, 1, 0 },
+ { smnreg_0x11A0020C, 1, 0 },
+ { smnreg_0x11A00210, 1, 0 },
+ { smnreg_0x11A00108, 1, 0 },
+};
+
+static ssize_t aqua_vanjaram_read_xgmi_state(struct amdgpu_device *adev,
+ void *buf, size_t max_size)
+{
+ struct amdgpu_reg_state_xgmi_v1_0 *xgmi_reg_state;
+ uint32_t start_addr, incrx, num_regs, szbuf;
+ struct amdgpu_regs_xgmi_v1_0 *xgmi_regs;
+ struct amdgpu_smn_reg_data *reg_data;
+ const int max_xgmi_instances = 8;
+ int inst = 0, i, j, r, n;
+ const int xgmi_inst = 2;
+ void *p;
+
+ if (!buf || !max_size)
+ return -EINVAL;
+
+ xgmi_reg_state = (struct amdgpu_reg_state_xgmi_v1_0 *)buf;
+
+ szbuf = sizeof(*xgmi_reg_state) +
+ amdgpu_reginst_size(max_xgmi_instances, sizeof(*xgmi_regs),
+ NUM_XGMI_SMN_REGS);
+ /* Only one instance of pcie regs */
+ if (max_size < szbuf)
+ return -EOVERFLOW;
+
+ p = &xgmi_reg_state->xgmi_state_regs[0];
+ for_each_inst(i, adev->aid_mask) {
+ for (j = 0; j < xgmi_inst; ++j) {
+ xgmi_regs = (struct amdgpu_regs_xgmi_v1_0 *)p;
+ xgmi_regs->inst_header.instance = inst++;
+
+ xgmi_regs->inst_header.state = AMDGPU_INST_S_OK;
+ xgmi_regs->inst_header.num_smn_regs = NUM_XGMI_SMN_REGS;
+
+ reg_data = xgmi_regs->smn_reg_values;
+
+ for (r = 0; r < ARRAY_SIZE(xgmi_reg_addrs); r++) {
+ start_addr = xgmi_reg_addrs[r].start_addr;
+ incrx = xgmi_reg_addrs[r].incrx;
+ num_regs = xgmi_reg_addrs[r].num_regs;
+
+ for (n = 0; n < num_regs; n++) {
+ aqua_read_smn_ext(
+ adev, reg_data,
+ XGMI_LINK_REG(start_addr, j) +
+ n * incrx,
+ i);
+ ++reg_data;
+ }
+ }
+ p = reg_data;
+ }
+ }
+
+ xgmi_reg_state->common_header.structure_size = szbuf;
+ xgmi_reg_state->common_header.format_revision = 1;
+ xgmi_reg_state->common_header.content_revision = 0;
+ xgmi_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_XGMI;
+ xgmi_reg_state->common_header.num_instances = max_xgmi_instances;
+
+ return xgmi_reg_state->common_header.structure_size;
+}
+
+#define smnreg_0x11C00070 0x11C00070
+#define smnreg_0x11C00210 0x11C00210
+
+static struct aqua_reg_list wafl_reg_addrs[] = {
+ { smnreg_0x11C00070, 4, DW_ADDR_INCR },
+ { smnreg_0x11C00210, 1, 0 },
+};
+
+#define WAFL_LINK_REG(smnreg, l) ((smnreg) | (l << 20))
+
+#define NUM_WAFL_SMN_REGS 5
+
+static ssize_t aqua_vanjaram_read_wafl_state(struct amdgpu_device *adev,
+ void *buf, size_t max_size)
+{
+ struct amdgpu_reg_state_wafl_v1_0 *wafl_reg_state;
+ uint32_t start_addr, incrx, num_regs, szbuf;
+ struct amdgpu_regs_wafl_v1_0 *wafl_regs;
+ struct amdgpu_smn_reg_data *reg_data;
+ const int max_wafl_instances = 8;
+ int inst = 0, i, j, r, n;
+ const int wafl_inst = 2;
+ void *p;
+
+ if (!buf || !max_size)
+ return -EINVAL;
+
+ wafl_reg_state = (struct amdgpu_reg_state_wafl_v1_0 *)buf;
+
+ szbuf = sizeof(*wafl_reg_state) +
+ amdgpu_reginst_size(max_wafl_instances, sizeof(*wafl_regs),
+ NUM_WAFL_SMN_REGS);
+
+ if (max_size < szbuf)
+ return -EOVERFLOW;
+
+ p = &wafl_reg_state->wafl_state_regs[0];
+ for_each_inst(i, adev->aid_mask) {
+ for (j = 0; j < wafl_inst; ++j) {
+ wafl_regs = (struct amdgpu_regs_wafl_v1_0 *)p;
+ wafl_regs->inst_header.instance = inst++;
+
+ wafl_regs->inst_header.state = AMDGPU_INST_S_OK;
+ wafl_regs->inst_header.num_smn_regs = NUM_WAFL_SMN_REGS;
+
+ reg_data = wafl_regs->smn_reg_values;
+
+ for (r = 0; r < ARRAY_SIZE(wafl_reg_addrs); r++) {
+ start_addr = wafl_reg_addrs[r].start_addr;
+ incrx = wafl_reg_addrs[r].incrx;
+ num_regs = wafl_reg_addrs[r].num_regs;
+ for (n = 0; n < num_regs; n++) {
+ aqua_read_smn_ext(
+ adev, reg_data,
+ WAFL_LINK_REG(start_addr, j) +
+ n * incrx,
+ i);
+ ++reg_data;
+ }
+ }
+ p = reg_data;
+ }
+ }
+
+ wafl_reg_state->common_header.structure_size = szbuf;
+ wafl_reg_state->common_header.format_revision = 1;
+ wafl_reg_state->common_header.content_revision = 0;
+ wafl_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_WAFL;
+ wafl_reg_state->common_header.num_instances = max_wafl_instances;
+
+ return wafl_reg_state->common_header.structure_size;
+}
+
+#define smnreg_0x1B311060 0x1B311060
+#define smnreg_0x1B411060 0x1B411060
+#define smnreg_0x1B511060 0x1B511060
+#define smnreg_0x1B611060 0x1B611060
+
+#define smnreg_0x1C307120 0x1C307120
+#define smnreg_0x1C317120 0x1C317120
+
+#define smnreg_0x1C320830 0x1C320830
+#define smnreg_0x1C380830 0x1C380830
+#define smnreg_0x1C3D0830 0x1C3D0830
+#define smnreg_0x1C420830 0x1C420830
+
+#define smnreg_0x1C320100 0x1C320100
+#define smnreg_0x1C380100 0x1C380100
+#define smnreg_0x1C3D0100 0x1C3D0100
+#define smnreg_0x1C420100 0x1C420100
+
+#define smnreg_0x1B310500 0x1B310500
+#define smnreg_0x1C300400 0x1C300400
+
+#define USR_CAKE_INCR 0x11000
+#define USR_LINK_INCR 0x100000
+#define USR_CP_INCR 0x10000
+
+#define NUM_USR_SMN_REGS 20
+
+struct aqua_reg_list usr_reg_addrs[] = {
+ { smnreg_0x1B311060, 4, DW_ADDR_INCR },
+ { smnreg_0x1B411060, 4, DW_ADDR_INCR },
+ { smnreg_0x1B511060, 4, DW_ADDR_INCR },
+ { smnreg_0x1B611060, 4, DW_ADDR_INCR },
+ { smnreg_0x1C307120, 2, DW_ADDR_INCR },
+ { smnreg_0x1C317120, 2, DW_ADDR_INCR },
+};
+
+#define NUM_USR1_SMN_REGS 46
+struct aqua_reg_list usr1_reg_addrs[] = {
+ { smnreg_0x1C320830, 6, USR_CAKE_INCR },
+ { smnreg_0x1C380830, 5, USR_CAKE_INCR },
+ { smnreg_0x1C3D0830, 5, USR_CAKE_INCR },
+ { smnreg_0x1C420830, 4, USR_CAKE_INCR },
+ { smnreg_0x1C320100, 6, USR_CAKE_INCR },
+ { smnreg_0x1C380100, 5, USR_CAKE_INCR },
+ { smnreg_0x1C3D0100, 5, USR_CAKE_INCR },
+ { smnreg_0x1C420100, 4, USR_CAKE_INCR },
+ { smnreg_0x1B310500, 4, USR_LINK_INCR },
+ { smnreg_0x1C300400, 2, USR_CP_INCR },
+};
+
+static ssize_t aqua_vanjaram_read_usr_state(struct amdgpu_device *adev,
+ void *buf, size_t max_size,
+ int reg_state)
+{
+ uint32_t start_addr, incrx, num_regs, szbuf, num_smn;
+ struct amdgpu_reg_state_usr_v1_0 *usr_reg_state;
+ struct amdgpu_regs_usr_v1_0 *usr_regs;
+ struct amdgpu_smn_reg_data *reg_data;
+ const int max_usr_instances = 4;
+ struct aqua_reg_list *reg_addrs;
+ int inst = 0, i, n, r, arr_size;
+ void *p;
+
+ if (!buf || !max_size)
+ return -EINVAL;
+
+ switch (reg_state) {
+ case AMDGPU_REG_STATE_TYPE_USR:
+ arr_size = ARRAY_SIZE(usr_reg_addrs);
+ reg_addrs = usr_reg_addrs;
+ num_smn = NUM_USR_SMN_REGS;
+ break;
+ case AMDGPU_REG_STATE_TYPE_USR_1:
+ arr_size = ARRAY_SIZE(usr1_reg_addrs);
+ reg_addrs = usr1_reg_addrs;
+ num_smn = NUM_USR1_SMN_REGS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ usr_reg_state = (struct amdgpu_reg_state_usr_v1_0 *)buf;
+
+ szbuf = sizeof(*usr_reg_state) + amdgpu_reginst_size(max_usr_instances,
+ sizeof(*usr_regs),
+ num_smn);
+ if (max_size < szbuf)
+ return -EOVERFLOW;
+
+ p = &usr_reg_state->usr_state_regs[0];
+ for_each_inst(i, adev->aid_mask) {
+ usr_regs = (struct amdgpu_regs_usr_v1_0 *)p;
+ usr_regs->inst_header.instance = inst++;
+ usr_regs->inst_header.state = AMDGPU_INST_S_OK;
+ usr_regs->inst_header.num_smn_regs = num_smn;
+ reg_data = usr_regs->smn_reg_values;
+
+ for (r = 0; r < arr_size; r++) {
+ start_addr = reg_addrs[r].start_addr;
+ incrx = reg_addrs[r].incrx;
+ num_regs = reg_addrs[r].num_regs;
+ for (n = 0; n < num_regs; n++) {
+ aqua_read_smn_ext(adev, reg_data,
+ start_addr + n * incrx, i);
+ reg_data++;
+ }
+ }
+ p = reg_data;
+ }
+
+ usr_reg_state->common_header.structure_size = szbuf;
+ usr_reg_state->common_header.format_revision = 1;
+ usr_reg_state->common_header.content_revision = 0;
+ usr_reg_state->common_header.state_type = AMDGPU_REG_STATE_TYPE_USR;
+ usr_reg_state->common_header.num_instances = max_usr_instances;
+
+ return usr_reg_state->common_header.structure_size;
+}
+
+ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev,
+ enum amdgpu_reg_state reg_state, void *buf,
+ size_t max_size)
+{
+ ssize_t size;
+
+ switch (reg_state) {
+ case AMDGPU_REG_STATE_TYPE_PCIE:
+ size = aqua_vanjaram_read_pcie_state(adev, buf, max_size);
+ break;
+ case AMDGPU_REG_STATE_TYPE_XGMI:
+ size = aqua_vanjaram_read_xgmi_state(adev, buf, max_size);
+ break;
+ case AMDGPU_REG_STATE_TYPE_WAFL:
+ size = aqua_vanjaram_read_wafl_state(adev, buf, max_size);
+ break;
+ case AMDGPU_REG_STATE_TYPE_USR:
+ size = aqua_vanjaram_read_usr_state(adev, buf, max_size,
+ AMDGPU_REG_STATE_TYPE_USR);
+ break;
+ case AMDGPU_REG_STATE_TYPE_USR_1:
+ size = aqua_vanjaram_read_usr_state(
+ adev, buf, max_size, AMDGPU_REG_STATE_TYPE_USR_1);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return size;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c b/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c
new file mode 100644
index 000000000000..fda99c958c3b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "soc15.h"
+
+#include "soc15_common.h"
+#include "arct_ip_offset.h"
+
+int arct_reg_base_init(struct amdgpu_device *adev)
+{
+ /* HW has more IP blocks, only initialized the block needed by our driver */
+ uint32_t i;
+ for (i = 0 ; i < MAX_INSTANCE ; ++i) {
+ adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
+ adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
+ adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
+ adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIF0_BASE.instance[i]));
+ adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
+ adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
+ adev->reg_offset[UVD_HWIP][i] = (uint32_t *)(&(UVD_BASE.instance[i]));
+ adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
+ adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
+ adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(SDMA0_BASE.instance[i]));
+ adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(SDMA1_BASE.instance[i]));
+ adev->reg_offset[SDMA2_HWIP][i] = (uint32_t *)(&(SDMA2_BASE.instance[i]));
+ adev->reg_offset[SDMA3_HWIP][i] = (uint32_t *)(&(SDMA3_BASE.instance[i]));
+ adev->reg_offset[SDMA4_HWIP][i] = (uint32_t *)(&(SDMA4_BASE.instance[i]));
+ adev->reg_offset[SDMA5_HWIP][i] = (uint32_t *)(&(SDMA5_BASE.instance[i]));
+ adev->reg_offset[SDMA6_HWIP][i] = (uint32_t *)(&(SDMA6_BASE.instance[i]));
+ adev->reg_offset[SDMA7_HWIP][i] = (uint32_t *)(&(SDMA7_BASE.instance[i]));
+ adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
+ adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+ adev->reg_offset[UMC_HWIP][i] = (uint32_t *)(&(UMC_BASE.instance[i]));
+ adev->reg_offset[RSMU_HWIP][i] = (uint32_t *)(&(RSMU_BASE.instance[i]));
+ }
+ return 0;
+}
+
+
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
new file mode 100644
index 000000000000..42f4e163e251
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "athub_v1_0.h"
+
+#include "athub/athub_1_0_offset.h"
+#include "athub/athub_1_0_sh_mask.h"
+#include "vega10_enum.h"
+
+#include "soc15_common.h"
+
+static void athub_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
+ data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
+ else
+ data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
+
+ if (def != data)
+ WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
+}
+
+static void athub_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) &&
+ (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
+ data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
+ else
+ data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
+
+ if(def != data)
+ WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
+}
+
+int athub_v1_0_set_clockgating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state)
+{
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) {
+ case IP_VERSION(9, 0, 0):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 0):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(1, 5, 0):
+ athub_update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE);
+ athub_update_medium_grain_light_sleep(adev,
+ state == AMD_CG_STATE_GATE);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void athub_v1_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
+{
+ int data;
+
+ if (amdgpu_sriov_vf(adev))
+ *flags = 0;
+
+ /* AMD_CG_SUPPORT_ATHUB_MGCG */
+ data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
+ if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_ATHUB_MGCG;
+
+ /* AMD_CG_SUPPORT_ATHUB_LS */
+ if (data & ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_ATHUB_LS;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.h
new file mode 100644
index 000000000000..6be0a6704ea7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __ATHUB_V1_0_H__
+#define __ATHUB_V1_0_H__
+
+int athub_v1_0_set_clockgating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state);
+void athub_v1_0_get_clockgating(struct amdgpu_device *adev, u64 *flags);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
new file mode 100644
index 000000000000..5a122f50a6e7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "athub_v2_0.h"
+
+#include "athub/athub_2_0_0_offset.h"
+#include "athub/athub_2_0_0_sh_mask.h"
+#include "athub/athub_2_0_0_default.h"
+
+#include "soc15_common.h"
+
+static void
+athub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
+ return;
+
+ def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
+
+ if (enable)
+ data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
+ else
+ data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
+
+ if (def != data)
+ WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
+}
+
+static void
+athub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ if (!((adev->cg_flags & AMD_CG_SUPPORT_MC_LS) &&
+ (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)))
+ return;
+
+ def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
+
+ if (enable)
+ data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
+ else
+ data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
+
+ if (def != data)
+ WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
+}
+
+int athub_v2_0_set_clockgating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state)
+{
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) {
+ case IP_VERSION(1, 3, 1):
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 0, 2):
+ athub_v2_0_update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE);
+ athub_v2_0_update_medium_grain_light_sleep(adev,
+ state == AMD_CG_STATE_GATE);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void athub_v2_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
+{
+ int data;
+
+ /* AMD_CG_SUPPORT_ATHUB_MGCG */
+ data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
+ if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_ATHUB_MGCG;
+
+ /* AMD_CG_SUPPORT_ATHUB_LS */
+ if (data & ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_ATHUB_LS;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.h b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.h
new file mode 100644
index 000000000000..8b763f6dfd81
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __ATHUB_V2_0_H__
+#define __ATHUB_V2_0_H__
+
+int athub_v2_0_set_clockgating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state);
+void athub_v2_0_get_clockgating(struct amdgpu_device *adev, u64 *flags);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
new file mode 100644
index 000000000000..e143fcc46148
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "athub_v2_1.h"
+
+#include "athub/athub_2_1_0_offset.h"
+#include "athub/athub_2_1_0_sh_mask.h"
+
+#include "soc15_common.h"
+
+static void
+athub_v2_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
+ data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
+ else
+ data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
+
+ if (def != data)
+ WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
+}
+
+static void
+athub_v2_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ def = data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) &&
+ (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
+ data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
+ else
+ data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
+
+ if(def != data)
+ WREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL, data);
+}
+
+int athub_v2_1_set_clockgating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state)
+{
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) {
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 1, 2):
+ case IP_VERSION(2, 4, 0):
+ athub_v2_1_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE);
+ athub_v2_1_update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void athub_v2_1_get_clockgating(struct amdgpu_device *adev, u64 *flags)
+{
+ int data;
+
+ /* AMD_CG_SUPPORT_ATHUB_MGCG */
+ data = RREG32_SOC15(ATHUB, 0, mmATHUB_MISC_CNTL);
+ if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_ATHUB_MGCG;
+
+ /* AMD_CG_SUPPORT_ATHUB_LS */
+ if (data & ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_ATHUB_LS;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.h b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.h
new file mode 100644
index 000000000000..b799f14bce03
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __ATHUB_V2_1_H__
+#define __ATHUB_V2_1_H__
+
+int athub_v2_1_set_clockgating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state);
+void athub_v2_1_get_clockgating(struct amdgpu_device *adev, u64 *flags);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
new file mode 100644
index 000000000000..d1bba9c64e16
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "athub_v3_0.h"
+#include "athub/athub_3_0_0_offset.h"
+#include "athub/athub_3_0_0_sh_mask.h"
+#include "navi10_enum.h"
+#include "soc15_common.h"
+
+#define regATHUB_MISC_CNTL_V3_0_1 0x00d7
+#define regATHUB_MISC_CNTL_V3_0_1_BASE_IDX 0
+#define regATHUB_MISC_CNTL_V3_3_0 0x00d8
+#define regATHUB_MISC_CNTL_V3_3_0_BASE_IDX 0
+
+
+static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev)
+{
+ uint32_t data;
+
+ switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) {
+ case IP_VERSION(3, 0, 1):
+ data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1);
+ break;
+ case IP_VERSION(3, 3, 0):
+ data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_3_0);
+ break;
+ default:
+ data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ break;
+ }
+ return data;
+}
+
+static void athub_v3_0_set_cg_cntl(struct amdgpu_device *adev, uint32_t data)
+{
+ switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) {
+ case IP_VERSION(3, 0, 1):
+ WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1, data);
+ break;
+ case IP_VERSION(3, 3, 0):
+ WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_3_0, data);
+ break;
+ default:
+ WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
+ break;
+ }
+}
+
+static void
+athub_v3_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ def = data = athub_v3_0_get_cg_cntl(adev);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_MGCG))
+ data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
+ else
+ data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
+
+ if (def != data)
+ athub_v3_0_set_cg_cntl(adev, data);
+}
+
+static void
+athub_v3_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ def = data = athub_v3_0_get_cg_cntl(adev);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_LS))
+ data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
+ else
+ data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
+
+ if (def != data)
+ athub_v3_0_set_cg_cntl(adev, data);
+}
+
+int athub_v3_0_set_clockgating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state)
+{
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) {
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 3, 0):
+ athub_v3_0_update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE);
+ athub_v3_0_update_medium_grain_light_sleep(adev,
+ state == AMD_CG_STATE_GATE);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void athub_v3_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
+{
+ int data;
+
+ /* AMD_CG_SUPPORT_ATHUB_MGCG */
+ data = athub_v3_0_get_cg_cntl(adev);
+ if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_ATHUB_MGCG;
+
+ /* AMD_CG_SUPPORT_ATHUB_LS */
+ if (data & ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_ATHUB_LS;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v3_0.h b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.h
new file mode 100644
index 000000000000..e08a7d564365
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v3_0.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __ATHUB_V3_0_H__
+#define __ATHUB_V3_0_H__
+
+int athub_v3_0_set_clockgating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state);
+void athub_v3_0_get_clockgating(struct amdgpu_device *adev, u64 *flags);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.c
new file mode 100644
index 000000000000..8a0773b80864
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "athub_v4_1_0.h"
+#include "athub/athub_4_1_0_offset.h"
+#include "athub/athub_4_1_0_sh_mask.h"
+#include "soc15_common.h"
+
+static uint32_t athub_v4_1_0_get_cg_cntl(struct amdgpu_device *adev)
+{
+ uint32_t data;
+
+ switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) {
+ case IP_VERSION(4, 1, 0):
+ data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
+ break;
+ default:
+ data = 0;
+ break;
+ }
+ return data;
+}
+
+static void athub_v4_1_0_set_cg_cntl(struct amdgpu_device *adev, uint32_t data)
+{
+ switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) {
+ case IP_VERSION(4, 1, 0):
+ WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+athub_v4_1_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ def = data = athub_v4_1_0_get_cg_cntl(adev);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_MGCG))
+ data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
+ else
+ data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
+
+ if (def != data)
+ athub_v4_1_0_set_cg_cntl(adev, data);
+}
+
+static void
+athub_v4_1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ def = data = athub_v4_1_0_get_cg_cntl(adev);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_LS))
+ data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
+ else
+ data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
+
+ if (def != data)
+ athub_v4_1_0_set_cg_cntl(adev, data);
+}
+
+int athub_v4_1_0_set_clockgating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state)
+{
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ switch (amdgpu_ip_version(adev, ATHUB_HWIP, 0)) {
+ case IP_VERSION(4, 1, 0):
+ athub_v4_1_0_update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE);
+ athub_v4_1_0_update_medium_grain_light_sleep(adev,
+ state == AMD_CG_STATE_GATE);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void athub_v4_1_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
+{
+ int data;
+
+ /* AMD_CG_SUPPORT_ATHUB_MGCG */
+ data = athub_v4_1_0_get_cg_cntl(adev);
+ if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_ATHUB_MGCG;
+
+ /* AMD_CG_SUPPORT_ATHUB_LS */
+ if (data & ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_ATHUB_LS;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.h b/drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.h
new file mode 100644
index 000000000000..4d18d0998fa8
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v4_1_0.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __ATHUB_V4_1_0_H__
+#define __ATHUB_V4_1_0_H__
+
+int athub_v4_1_0_set_clockgating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state);
+void athub_v4_1_0_get_clockgating(struct amdgpu_device *adev, u64 *flags);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index d69aa2e179bb..7a063e44d429 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -25,10 +25,15 @@
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
-#include <asm/unaligned.h>
+#include <linux/string_helpers.h>
+
+#include <linux/unaligned.h>
+
+#include <drm/drm_util.h>
#define ATOM_DEBUG
+#include "atomfirmware.h"
#include "atom.h"
#include "atom-names.h"
#include "atom-bits.h"
@@ -52,9 +57,12 @@
#define PLL_INDEX 2
#define PLL_DATA 3
+#define ATOM_CMD_TIMEOUT_SEC 20
+
typedef struct {
struct atom_context *ctx;
uint32_t *ps, *ws;
+ int ps_size, ws_size;
int ps_shift;
uint16_t start;
unsigned last_jump;
@@ -62,13 +70,13 @@ typedef struct {
bool abort;
} atom_exec_context;
-int amdgpu_atom_debug = 0;
-static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
-int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
+int amdgpu_atom_debug;
+static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params, int params_size);
+int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size);
static uint32_t atom_arg_mask[8] =
- { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
-0xFF000000 };
+ { 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
+ 0xFF000000 };
static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 };
static int atom_dst_to_src[8][4] = {
@@ -84,7 +92,7 @@ static int atom_dst_to_src[8][4] = {
};
static int atom_def_dst[8] = { 0, 0, 1, 2, 0, 1, 2, 3 };
-static int debug_depth = 0;
+static int debug_depth;
#ifdef ATOM_DEBUG
static void debug_print_spaces(int n)
{
@@ -110,11 +118,11 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
base++;
break;
case ATOM_IIO_READ:
- temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
+ temp = ctx->card->reg_read(ctx->card, CU16(base + 1));
base += 3;
break;
case ATOM_IIO_WRITE:
- ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
+ ctx->card->reg_write(ctx->card, CU16(base + 1), temp);
base += 3;
break;
case ATOM_IIO_CLEAR:
@@ -216,7 +224,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
(*ptr)++;
/* get_unaligned_le32 avoids unaligned accesses from atombios
* tables, noticed on a DEC Alpha. */
- val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
+ if (idx < ctx->ps_size)
+ val = get_unaligned_le32((u32 *)&ctx->ps[idx]);
+ else
+ pr_info("PS index out of range: %i > %i\n", idx, ctx->ps_size);
if (print)
DEBUG("PS[0x%02X,0x%04X]", idx, val);
break;
@@ -254,7 +265,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
val = gctx->reg_block;
break;
default:
- val = ctx->ws[idx];
+ if (idx < ctx->ws_size)
+ val = ctx->ws[idx];
+ else
+ pr_info("WS index out of range: %i > %i\n", idx, ctx->ws_size);
}
break;
case ATOM_ARG_ID:
@@ -287,7 +301,7 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
(*ptr) += 4;
if (print)
DEBUG("IMM 0x%08X\n", val);
- return val;
+ break;
case ATOM_SRC_WORD0:
case ATOM_SRC_WORD8:
case ATOM_SRC_WORD16:
@@ -295,7 +309,7 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
(*ptr) += 2;
if (print)
DEBUG("IMM 0x%04X\n", val);
- return val;
+ break;
case ATOM_SRC_BYTE0:
case ATOM_SRC_BYTE8:
case ATOM_SRC_BYTE16:
@@ -304,9 +318,9 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
(*ptr)++;
if (print)
DEBUG("IMM 0x%02X\n", val);
- return val;
+ break;
}
- return 0;
+ return val;
case ATOM_ARG_PLL:
idx = U8(*ptr);
(*ptr)++;
@@ -388,7 +402,6 @@ static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr)
(*ptr)++;
return;
}
- return;
}
}
@@ -489,6 +502,10 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
idx = U8(*ptr);
(*ptr)++;
DEBUG("PS[0x%02X]", idx);
+ if (idx >= ctx->ps_size) {
+ pr_info("PS index out of range: %i > %i\n", idx, ctx->ps_size);
+ return;
+ }
ctx->ps[idx] = cpu_to_le32(val);
break;
case ATOM_ARG_WS:
@@ -521,6 +538,10 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
gctx->reg_block = val;
break;
default:
+ if (idx >= ctx->ws_size) {
+ pr_info("WS index out of range: %i > %i\n", idx, ctx->ws_size);
+ return;
+ }
ctx->ws[idx] = val;
}
break;
@@ -618,7 +639,7 @@ static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
else
SDEBUG(" table: %d\n", idx);
if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
- r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
+ r = amdgpu_atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift, ctx->ps_size - ctx->ps_shift);
if (r) {
ctx->abort = true;
}
@@ -735,15 +756,16 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
break;
}
if (arg != ATOM_COND_ALWAYS)
- SDEBUG(" taken: %s\n", execute ? "yes" : "no");
+ SDEBUG(" taken: %s\n", str_yes_no(execute));
SDEBUG(" target: 0x%04X\n", target);
if (execute) {
if (ctx->last_jump == (ctx->start + target)) {
cjiffies = jiffies;
if (time_after(cjiffies, ctx->last_jump_jiffies)) {
cjiffies -= ctx->last_jump_jiffies;
- if ((jiffies_to_msecs(cjiffies) > 5000)) {
- DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
+ if ((jiffies_to_msecs(cjiffies) > ATOM_CMD_TIMEOUT_SEC*1000)) {
+ DRM_ERROR("atombios stuck in loop for more than %dsecs aborting\n",
+ ATOM_CMD_TIMEOUT_SEC);
ctx->abort = true;
}
} else {
@@ -1196,7 +1218,7 @@ static struct {
atom_op_div32, ATOM_ARG_WS},
};
-static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
+static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t *params, int params_size)
{
int base = CU16(ctx->cmd_table + 4 + 2 * index);
int len, ws, ps, ptr;
@@ -1218,12 +1240,21 @@ static int amdgpu_atom_execute_table_locked(struct atom_context *ctx, int index,
ectx.ps_shift = ps / 4;
ectx.start = base;
ectx.ps = params;
+ ectx.ps_size = params_size;
ectx.abort = false;
ectx.last_jump = 0;
- if (ws)
- ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
- else
+ ectx.last_jump_jiffies = 0;
+ if (ws) {
+ ectx.ws = kcalloc(4, ws, GFP_KERNEL);
+ if (!ectx.ws) {
+ ret = -ENOMEM;
+ goto free;
+ }
+ ectx.ws_size = ws;
+ } else {
ectx.ws = NULL;
+ ectx.ws_size = 0;
+ }
debug_depth++;
while (1) {
@@ -1257,7 +1288,7 @@ free:
return ret;
}
-int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size)
{
int r;
@@ -1273,7 +1304,7 @@ int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t * pa
/* reset divmul */
ctx->divmul[0] = 0;
ctx->divmul[1] = 0;
- r = amdgpu_atom_execute_table_locked(ctx, index, params);
+ r = amdgpu_atom_execute_table_locked(ctx, index, params, params_size);
mutex_unlock(&ctx->mutex);
return r;
}
@@ -1294,13 +1325,209 @@ static void atom_index_iio(struct atom_context *ctx, int base)
}
}
+static void atom_get_vbios_name(struct atom_context *ctx)
+{
+ unsigned char *p_rom;
+ unsigned char str_num;
+ unsigned short off_to_vbios_str;
+ unsigned char *c_ptr;
+ int name_size;
+ int i;
+
+ const char *na = "--N/A--";
+ char *back;
+
+ p_rom = ctx->bios;
+
+ str_num = *(p_rom + OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS);
+ if (str_num != 0) {
+ off_to_vbios_str =
+ *(unsigned short *)(p_rom + OFFSET_TO_GET_ATOMBIOS_STRING_START);
+
+ c_ptr = (unsigned char *)(p_rom + off_to_vbios_str);
+ } else {
+ /* do not know where to find name */
+ memcpy(ctx->name, na, 7);
+ ctx->name[7] = 0;
+ return;
+ }
+
+ /*
+ * skip the atombios strings, usually 4
+ * 1st is P/N, 2nd is ASIC, 3rd is PCI type, 4th is Memory type
+ */
+ for (i = 0; i < str_num; i++) {
+ while (*c_ptr != 0)
+ c_ptr++;
+ c_ptr++;
+ }
+
+ /* skip the following 2 chars: 0x0D 0x0A */
+ c_ptr += 2;
+
+ name_size = strnlen(c_ptr, STRLEN_LONG - 1);
+ memcpy(ctx->name, c_ptr, name_size);
+ back = ctx->name + name_size;
+ while ((*--back) == ' ')
+ ;
+ *(back + 1) = '\0';
+}
+
+static void atom_get_vbios_date(struct atom_context *ctx)
+{
+ unsigned char *p_rom;
+ unsigned char *date_in_rom;
+
+ p_rom = ctx->bios;
+
+ date_in_rom = p_rom + OFFSET_TO_VBIOS_DATE;
+
+ ctx->date[0] = '2';
+ ctx->date[1] = '0';
+ ctx->date[2] = date_in_rom[6];
+ ctx->date[3] = date_in_rom[7];
+ ctx->date[4] = '/';
+ ctx->date[5] = date_in_rom[0];
+ ctx->date[6] = date_in_rom[1];
+ ctx->date[7] = '/';
+ ctx->date[8] = date_in_rom[3];
+ ctx->date[9] = date_in_rom[4];
+ ctx->date[10] = ' ';
+ ctx->date[11] = date_in_rom[9];
+ ctx->date[12] = date_in_rom[10];
+ ctx->date[13] = date_in_rom[11];
+ ctx->date[14] = date_in_rom[12];
+ ctx->date[15] = date_in_rom[13];
+ ctx->date[16] = '\0';
+}
+
+static unsigned char *atom_find_str_in_rom(struct atom_context *ctx, char *str, int start,
+ int end, int maxlen)
+{
+ unsigned long str_off;
+ unsigned char *p_rom;
+ unsigned short str_len;
+
+ str_off = 0;
+ str_len = strnlen(str, maxlen);
+ p_rom = ctx->bios;
+
+ for (; start <= end; ++start) {
+ for (str_off = 0; str_off < str_len; ++str_off) {
+ if (str[str_off] != *(p_rom + start + str_off))
+ break;
+ }
+
+ if (str_off == str_len || str[str_off] == 0)
+ return p_rom + start;
+ }
+ return NULL;
+}
+
+static void atom_get_vbios_pn(struct atom_context *ctx)
+{
+ unsigned char *p_rom;
+ unsigned short off_to_vbios_str;
+ unsigned char *vbios_str;
+ int count;
+
+ off_to_vbios_str = 0;
+ p_rom = ctx->bios;
+
+ if (*(p_rom + OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS) != 0) {
+ off_to_vbios_str =
+ *(unsigned short *)(p_rom + OFFSET_TO_GET_ATOMBIOS_STRING_START);
+
+ vbios_str = (unsigned char *)(p_rom + off_to_vbios_str);
+ } else {
+ vbios_str = p_rom + OFFSET_TO_VBIOS_PART_NUMBER;
+ }
+
+ if (*vbios_str == 0) {
+ vbios_str = atom_find_str_in_rom(ctx, BIOS_ATOM_PREFIX, 3, 1024, 64);
+ if (vbios_str == NULL)
+ vbios_str += sizeof(BIOS_ATOM_PREFIX) - 1;
+ }
+ OPTIMIZER_HIDE_VAR(vbios_str);
+ if (vbios_str != NULL && *vbios_str == 0)
+ vbios_str++;
+
+ if (vbios_str != NULL) {
+ count = 0;
+ while ((count < BIOS_STRING_LENGTH) && vbios_str[count] >= ' ' &&
+ vbios_str[count] <= 'z') {
+ ctx->vbios_pn[count] = vbios_str[count];
+ count++;
+ }
+
+ ctx->vbios_pn[count] = 0;
+ }
+
+ pr_info("ATOM BIOS: %s\n", ctx->vbios_pn);
+}
+
+static void atom_get_vbios_version(struct atom_context *ctx)
+{
+ unsigned short start = 3, end;
+ unsigned char *vbios_ver;
+ unsigned char *p_rom;
+
+ p_rom = ctx->bios;
+ /* Search from strings offset if it's present */
+ start = *(unsigned short *)(p_rom +
+ OFFSET_TO_GET_ATOMBIOS_STRING_START);
+
+ /* Search till atom rom header start point */
+ end = *(unsigned short *)(p_rom + OFFSET_TO_ATOM_ROM_HEADER_POINTER);
+
+ /* Use hardcoded offsets, if the offsets are not populated */
+ if (end <= start) {
+ start = 3;
+ end = 1024;
+ }
+
+ /* find anchor ATOMBIOSBK-AMD */
+ vbios_ver =
+ atom_find_str_in_rom(ctx, BIOS_VERSION_PREFIX, start, end, 64);
+ if (vbios_ver != NULL) {
+ /* skip ATOMBIOSBK-AMD VER */
+ vbios_ver += 18;
+ memcpy(ctx->vbios_ver_str, vbios_ver, STRLEN_NORMAL);
+ } else {
+ ctx->vbios_ver_str[0] = '\0';
+ }
+}
+
+static void atom_get_vbios_build(struct atom_context *ctx)
+{
+ unsigned char *atom_rom_hdr;
+ unsigned char *str;
+ uint16_t base, len;
+
+ base = CU16(ATOM_ROM_TABLE_PTR);
+ atom_rom_hdr = CSTR(base);
+
+ str = CSTR(CU16(base + ATOM_ROM_CFG_PTR));
+ /* Skip config string */
+ while (str < atom_rom_hdr && *str++)
+ ;
+ /* Skip change list string */
+ while (str < atom_rom_hdr && *str++)
+ ;
+
+ len = min(atom_rom_hdr - str, STRLEN_NORMAL);
+ if (len)
+ strscpy(ctx->build_num, str, len);
+}
+
struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
{
int base;
struct atom_context *ctx =
kzalloc(sizeof(struct atom_context), GFP_KERNEL);
- char *str;
- u16 idx;
+ struct _ATOM_ROM_HEADER *atom_rom_header;
+ struct _ATOM_MASTER_DATA_TABLE *master_table;
+ struct _ATOM_FIRMWARE_INFO *atom_fw_info;
if (!ctx)
return NULL;
@@ -1338,13 +1565,22 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
return NULL;
}
- idx = CU16(ATOM_ROM_PART_NUMBER_PTR);
- if (idx == 0)
- idx = 0x80;
+ atom_rom_header = (struct _ATOM_ROM_HEADER *)CSTR(base);
+ if (atom_rom_header->usMasterDataTableOffset != 0) {
+ master_table = (struct _ATOM_MASTER_DATA_TABLE *)
+ CSTR(atom_rom_header->usMasterDataTableOffset);
+ if (master_table->ListOfDataTables.FirmwareInfo != 0) {
+ atom_fw_info = (struct _ATOM_FIRMWARE_INFO *)
+ CSTR(master_table->ListOfDataTables.FirmwareInfo);
+ ctx->version = atom_fw_info->ulFirmwareRevision;
+ }
+ }
- str = CSTR(idx);
- if (*str != '\0')
- pr_info("ATOM BIOS: %s\n", str);
+ atom_get_vbios_name(ctx);
+ atom_get_vbios_pn(ctx);
+ atom_get_vbios_date(ctx);
+ atom_get_vbios_version(ctx);
+ atom_get_vbios_build(ctx);
return ctx;
}
@@ -1364,7 +1600,7 @@ int amdgpu_atom_asic_init(struct atom_context *ctx)
if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
return 1;
- ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps);
+ ret = amdgpu_atom_execute_table(ctx, ATOM_CMD_INIT, ps, 16);
if (ret)
return ret;
@@ -1380,8 +1616,8 @@ void amdgpu_atom_destroy(struct atom_context *ctx)
}
bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index,
- uint16_t * size, uint8_t * frev, uint8_t * crev,
- uint16_t * data_start)
+ uint16_t *size, uint8_t *frev, uint8_t *crev,
+ uint16_t *data_start)
{
int offset = index * 2 + 4;
int idx = CU16(ctx->data_table + offset);
@@ -1400,8 +1636,8 @@ bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index,
return true;
}
-bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
- uint8_t * crev)
+bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev,
+ uint8_t *crev)
{
int offset = index * 2 + 4;
int idx = CU16(ctx->cmd_table + offset);
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h
index ddd8045accf3..825ff28731f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.h
+++ b/drivers/gpu/drm/amd/amdgpu/atom.h
@@ -26,17 +26,18 @@
#define ATOM_H
#include <linux/types.h>
-#include <drm/drmP.h>
+
+struct drm_device;
#define ATOM_BIOS_MAGIC 0xAA55
#define ATOM_ATI_MAGIC_PTR 0x30
#define ATOM_ATI_MAGIC " 761295520"
#define ATOM_ROM_TABLE_PTR 0x48
-#define ATOM_ROM_PART_NUMBER_PTR 0x6E
#define ATOM_ROM_MAGIC "ATOM"
#define ATOM_ROM_MAGIC_PTR 4
+#define ATOM_ROM_CFG_PTR 0xC
#define ATOM_ROM_MSG_PTR 0x10
#define ATOM_ROM_CMD_PTR 0x1E
#define ATOM_ROM_DATA_PTR 0x20
@@ -111,16 +112,21 @@
#define ATOM_IO_SYSIO 2
#define ATOM_IO_IIO 0x80
+#define STRLEN_NORMAL 32
+#define STRLEN_LONG 64
+#define STRLEN_VERYLONG 254
+
struct card_info {
struct drm_device *dev;
- void (* reg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
- uint32_t (* reg_read)(struct card_info *, uint32_t); /* filled by driver */
- void (* ioreg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
- uint32_t (* ioreg_read)(struct card_info *, uint32_t); /* filled by driver */
- void (* mc_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
- uint32_t (* mc_read)(struct card_info *, uint32_t); /* filled by driver */
- void (* pll_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
- uint32_t (* pll_read)(struct card_info *, uint32_t); /* filled by driver */
+ void (*reg_write)(struct card_info *info,
+ u32 reg, uint32_t val); /* filled by driver */
+ uint32_t (*reg_read)(struct card_info *info, uint32_t reg); /* filled by driver */
+ void (*mc_write)(struct card_info *info,
+ u32 reg, uint32_t val); /* filled by driver */
+ uint32_t (*mc_read)(struct card_info *info, uint32_t reg); /* filled by driver */
+ void (*pll_write)(struct card_info *info,
+ u32 reg, uint32_t val); /* filled by driver */
+ uint32_t (*pll_read)(struct card_info *info, uint32_t reg); /* filled by driver */
};
struct atom_context {
@@ -140,14 +146,21 @@ struct atom_context {
int io_mode;
uint32_t *scratch;
int scratch_size_bytes;
+
+ uint8_t name[STRLEN_LONG];
+ uint8_t vbios_pn[STRLEN_LONG];
+ uint32_t version;
+ uint8_t vbios_ver_str[STRLEN_NORMAL];
+ uint8_t date[STRLEN_NORMAL];
+ uint8_t build_num[STRLEN_NORMAL];
};
extern int amdgpu_atom_debug;
-struct atom_context *amdgpu_atom_parse(struct card_info *, void *);
-int amdgpu_atom_execute_table(struct atom_context *, int, uint32_t *);
-int amdgpu_atom_asic_init(struct atom_context *);
-void amdgpu_atom_destroy(struct atom_context *);
+struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios);
+int amdgpu_atom_execute_table(struct atom_context *ctx, int index, uint32_t *params, int params_size);
+int amdgpu_atom_asic_init(struct atom_context *ctx);
+void amdgpu_atom_destroy(struct atom_context *ctx);
bool amdgpu_atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
uint8_t *frev, uint8_t *crev, uint16_t *data_start);
bool amdgpu_atom_parse_cmd_header(struct atom_context *ctx, int index,
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index 8c9bc75a9c2d..3dfc28840a7d 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -23,8 +23,7 @@
* Authors: Dave Airlie
* Alex Deucher
*/
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
+
#include <drm/amdgpu_drm.h>
#include <drm/drm_fixed.h>
#include "amdgpu.h"
@@ -41,7 +40,7 @@ void amdgpu_atombios_crtc_overscan_setup(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
SET_CRTC_OVERSCAN_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan);
@@ -78,13 +77,13 @@ void amdgpu_atombios_crtc_overscan_setup(struct drm_crtc *crtc,
args.usOverscanTop = cpu_to_le16(amdgpu_crtc->v_border);
break;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void amdgpu_atombios_crtc_scaler_setup(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
ENABLE_SCALER_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
@@ -107,14 +106,14 @@ void amdgpu_atombios_crtc_scaler_setup(struct drm_crtc *crtc)
args.ucEnable = ATOM_SCALER_DISABLE;
break;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void amdgpu_atombios_crtc_lock(struct drm_crtc *crtc, int lock)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int index =
GetIndexIntoMasterTable(COMMAND, UpdateCRTC_DoubleBufferRegisters);
ENABLE_CRTC_PS_ALLOCATION args;
@@ -124,14 +123,14 @@ void amdgpu_atombios_crtc_lock(struct drm_crtc *crtc, int lock)
args.ucCRTC = amdgpu_crtc->crtc_id;
args.ucEnable = lock;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void amdgpu_atombios_crtc_enable(struct drm_crtc *crtc, int state)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int index = GetIndexIntoMasterTable(COMMAND, EnableCRTC);
ENABLE_CRTC_PS_ALLOCATION args;
@@ -140,14 +139,14 @@ void amdgpu_atombios_crtc_enable(struct drm_crtc *crtc, int state)
args.ucCRTC = amdgpu_crtc->crtc_id;
args.ucEnable = state;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void amdgpu_atombios_crtc_blank(struct drm_crtc *crtc, int state)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int index = GetIndexIntoMasterTable(COMMAND, BlankCRTC);
BLANK_CRTC_PS_ALLOCATION args;
@@ -156,35 +155,35 @@ void amdgpu_atombios_crtc_blank(struct drm_crtc *crtc, int state)
args.ucCRTC = amdgpu_crtc->crtc_id;
args.ucBlanking = state;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void amdgpu_atombios_crtc_powergate(struct drm_crtc *crtc, int state)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
- ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+ ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
memset(&args, 0, sizeof(args));
args.ucDispPipeId = amdgpu_crtc->crtc_id;
args.ucEnable = state;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
{
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
- ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+ ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
memset(&args, 0, sizeof(args));
args.ucEnable = ATOM_INIT;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc,
@@ -192,7 +191,7 @@ void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc,
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
SET_CRTC_USING_DTD_TIMING_PARAMETERS args;
int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_UsingDTDTiming);
u16 misc = 0;
@@ -229,7 +228,7 @@ void amdgpu_atombios_crtc_set_dtd_timing(struct drm_crtc *crtc,
args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = amdgpu_crtc->crtc_id;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
union atom_enable_ss {
@@ -294,7 +293,7 @@ static void amdgpu_atombios_crtc_program_ss(struct amdgpu_device *adev,
args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
args.v3.ucEnable = enable;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
union adjust_pixel_clock {
@@ -307,7 +306,7 @@ static u32 amdgpu_atombios_crtc_adjust_pll(struct drm_crtc *crtc,
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_encoder *encoder = amdgpu_crtc->encoder;
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -396,7 +395,7 @@ static u32 amdgpu_atombios_crtc_adjust_pll(struct drm_crtc *crtc,
ADJUST_DISPLAY_CONFIG_SS_ENABLE;
amdgpu_atom_execute_table(adev->mode_info.atom_context,
- index, (uint32_t *)&args);
+ index, (uint32_t *)&args, sizeof(args));
adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
break;
case 3:
@@ -429,7 +428,7 @@ static u32 amdgpu_atombios_crtc_adjust_pll(struct drm_crtc *crtc,
args.v3.sInput.ucExtTransmitterID = 0;
amdgpu_atom_execute_table(adev->mode_info.atom_context,
- index, (uint32_t *)&args);
+ index, (uint32_t *)&args, sizeof(args));
adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
if (args.v3.sOutput.ucRefDiv) {
amdgpu_crtc->pll_flags |= AMDGPU_PLL_USE_FRAC_FB_DIV;
@@ -515,7 +514,7 @@ void amdgpu_atombios_crtc_set_disp_eng_pll(struct amdgpu_device *adev,
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
union set_dce_clock {
@@ -545,7 +544,7 @@ u32 amdgpu_atombios_crtc_set_dce_clock(struct amdgpu_device *adev,
args.v2_1.asParam.ulDCEClkFreq = cpu_to_le32(freq); /* 10kHz units */
args.v2_1.asParam.ucDCEClkType = clk_type;
args.v2_1.asParam.ucDCEClkSrc = clk_src;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
ret_freq = le32_to_cpu(args.v2_1.asParam.ulDCEClkFreq) * 10;
break;
default:
@@ -588,7 +587,7 @@ void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc,
struct amdgpu_atom_ss *ss)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u8 frev, crev;
int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
union set_pixel_clock args;
@@ -741,7 +740,7 @@ void amdgpu_atombios_crtc_program_pll(struct drm_crtc *crtc,
return;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
int amdgpu_atombios_crtc_prepare_pll(struct drm_crtc *crtc,
@@ -749,7 +748,7 @@ int amdgpu_atombios_crtc_prepare_pll(struct drm_crtc *crtc,
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder =
to_amdgpu_encoder(amdgpu_crtc->encoder);
int encoder_mode = amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder);
@@ -818,7 +817,7 @@ void amdgpu_atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder =
to_amdgpu_encoder(amdgpu_crtc->encoder);
u32 pll_clock = mode->clock;
@@ -851,7 +850,7 @@ void amdgpu_atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
pll->reference_div = amdgpu_crtc->pll_reference_div;
pll->post_div = amdgpu_crtc->pll_post_div;
- amdgpu_pll_compute(pll, amdgpu_crtc->adjusted_clock, &pll_clock,
+ amdgpu_pll_compute(adev, pll, amdgpu_crtc->adjusted_clock, &pll_clock,
&fb_div, &frac_fb_div, &ref_div, &post_div);
amdgpu_atombios_crtc_program_ss(adev, ATOM_DISABLE, amdgpu_crtc->pll_id,
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index f81068ba4cc6..492813ab1b54 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -24,8 +24,10 @@
* Alex Deucher
* Jerome Glisse
*/
-#include <drm/drmP.h>
+
#include <drm/amdgpu_drm.h>
+#include <drm/display/drm_dp_helper.h>
+
#include "amdgpu.h"
#include "atom.h"
@@ -34,7 +36,6 @@
#include "atombios_dp.h"
#include "amdgpu_connectors.h"
#include "amdgpu_atombios.h"
-#include <drm/drm_dp_helper.h>
/* move these to drm_dp_helper.c/h */
#define DP_LINK_CONFIGURATION_SIZE 9
@@ -60,7 +61,7 @@ static int amdgpu_atombios_dp_process_aux_ch(struct amdgpu_i2c_chan *chan,
u8 delay, u8 *ack)
{
struct drm_device *dev = chan->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
union aux_channel_transaction args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
unsigned char *base;
@@ -82,7 +83,7 @@ static int amdgpu_atombios_dp_process_aux_ch(struct amdgpu_i2c_chan *chan,
args.v2.ucDelay = delay / 10;
args.v2.ucHPD_ID = chan->rec.hpd;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
*ack = args.v2.ucReplyStatus;
@@ -186,16 +187,12 @@ amdgpu_atombios_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *m
void amdgpu_atombios_dp_aux_init(struct amdgpu_connector *amdgpu_connector)
{
- int ret;
-
amdgpu_connector->ddc_bus->rec.hpd = amdgpu_connector->hpd.hpd;
- amdgpu_connector->ddc_bus->aux.dev = amdgpu_connector->base.kdev;
amdgpu_connector->ddc_bus->aux.transfer = amdgpu_atombios_dp_aux_transfer;
- ret = drm_dp_aux_register(&amdgpu_connector->ddc_bus->aux);
- if (!ret)
- amdgpu_connector->ddc_bus->has_aux = true;
+ amdgpu_connector->ddc_bus->aux.drm_dev = amdgpu_connector->base.dev;
- WARN(ret, "drm_dp_aux_register_i2c_bus() failed with error %d\n", ret);
+ drm_dp_aux_init(&amdgpu_connector->ddc_bus->aux);
+ amdgpu_connector->ddc_bus->has_aux = true;
}
/***** general DP utility functions *****/
@@ -304,14 +301,14 @@ static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev,
args.ucLaneNum = lane_num;
args.ucStatus = 0;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
return args.ucStatus;
}
u8 amdgpu_atombios_dp_get_sinktype(struct amdgpu_connector *amdgpu_connector)
{
struct drm_device *dev = amdgpu_connector->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
return amdgpu_atombios_dp_encoder_service(adev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
amdgpu_connector->ddc_bus->rec.i2c_id, 0);
@@ -334,6 +331,22 @@ static void amdgpu_atombios_dp_probe_oui(struct amdgpu_connector *amdgpu_connect
buf[0], buf[1], buf[2]);
}
+static void amdgpu_atombios_dp_ds_ports(struct amdgpu_connector *amdgpu_connector)
+{
+ struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv;
+ int ret;
+
+ if (dig_connector->dpcd[DP_DPCD_REV] > 0x10) {
+ ret = drm_dp_dpcd_read(&amdgpu_connector->ddc_bus->aux,
+ DP_DOWNSTREAM_PORT_0,
+ dig_connector->downstream_ports,
+ DP_MAX_DOWNSTREAM_PORTS);
+ if (ret)
+ memset(dig_connector->downstream_ports, 0,
+ DP_MAX_DOWNSTREAM_PORTS);
+ }
+}
+
int amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector *amdgpu_connector)
{
struct amdgpu_connector_atom_dig *dig_connector = amdgpu_connector->con_priv;
@@ -349,7 +362,7 @@ int amdgpu_atombios_dp_get_dpcd(struct amdgpu_connector *amdgpu_connector)
dig_connector->dpcd);
amdgpu_atombios_dp_probe_oui(amdgpu_connector);
-
+ amdgpu_atombios_dp_ds_ports(amdgpu_connector);
return 0;
}
@@ -361,7 +374,6 @@ int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- struct amdgpu_connector_atom_dig *dig_connector;
int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
u16 dp_bridge = amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector);
u8 tmp;
@@ -369,8 +381,6 @@ int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder,
if (!amdgpu_connector->con_priv)
return panel_mode;
- dig_connector = amdgpu_connector->con_priv;
-
if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
/* DP bridge chips */
if (drm_dp_dpcd_readb(&amdgpu_connector->ddc_bus->aux,
@@ -420,7 +430,7 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
}
int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *dig_connector;
@@ -448,8 +458,8 @@ bool amdgpu_atombios_dp_needs_link_train(struct amdgpu_connector *amdgpu_connect
u8 link_status[DP_LINK_STATUS_SIZE];
struct amdgpu_connector_atom_dig *dig = amdgpu_connector->con_priv;
- if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux, link_status)
- <= 0)
+ if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux,
+ link_status) < 0)
return false;
if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
return false;
@@ -603,10 +613,10 @@ amdgpu_atombios_dp_link_train_cr(struct amdgpu_atombios_dp_link_train_info *dp_i
dp_info->tries = 0;
voltage = 0xff;
while (1) {
- drm_dp_link_train_clock_recovery_delay(dp_info->dpcd);
+ drm_dp_link_train_clock_recovery_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
- dp_info->link_status) <= 0) {
+ dp_info->link_status) < 0) {
DRM_ERROR("displayport link status failed\n");
break;
}
@@ -668,10 +678,10 @@ amdgpu_atombios_dp_link_train_ce(struct amdgpu_atombios_dp_link_train_info *dp_i
dp_info->tries = 0;
channel_eq = false;
while (1) {
- drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
+ drm_dp_link_train_channel_eq_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
- dp_info->link_status) <= 0) {
+ dp_info->link_status) < 0) {
DRM_ERROR("displayport link status failed\n");
break;
}
@@ -711,9 +721,8 @@ void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig;
struct amdgpu_connector *amdgpu_connector;
struct amdgpu_connector_atom_dig *dig_connector;
struct amdgpu_atombios_dp_link_train_info dp_info;
@@ -721,7 +730,6 @@ void amdgpu_atombios_dp_link_train(struct drm_encoder *encoder,
if (!amdgpu_encoder->enc_priv)
return;
- dig = amdgpu_encoder->enc_priv;
amdgpu_connector = to_amdgpu_connector(connector);
if (!amdgpu_connector->con_priv)
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.h b/drivers/gpu/drm/amd/amdgpu/atombios_dp.h
index f59d85eaddf0..3e24acf8133f 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.h
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.h
@@ -32,7 +32,7 @@ int amdgpu_atombios_dp_get_panel_mode(struct drm_encoder *encoder,
void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
const struct drm_display_mode *mode);
int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
bool amdgpu_atombios_dp_needs_link_train(struct amdgpu_connector *amdgpu_connector);
void amdgpu_atombios_dp_set_rx_power_state(struct drm_connector *connector,
u8 power_state);
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 2af26d2da127..a51f3414b65d 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -23,18 +23,23 @@
* Authors: Dave Airlie
* Alex Deucher
*/
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
+
+#include <linux/pci.h>
+
+#include <acpi/video.h>
+
+#include <drm/drm_edid.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_connectors.h"
+#include "amdgpu_display.h"
#include "atom.h"
#include "atombios_encoders.h"
#include "atombios_dp.h"
#include <linux/backlight.h>
#include "bif/bif_4_1_d.h"
-static u8
+u8
amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
{
u8 backlight_level;
@@ -48,7 +53,7 @@ amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
return backlight_level;
}
-static void
+void
amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
u8 backlight_level)
{
@@ -67,7 +72,7 @@ u8
amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
{
struct drm_device *dev = amdgpu_encoder->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
return 0;
@@ -81,7 +86,7 @@ amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encode
{
struct drm_encoder *encoder = &amdgpu_encoder->base;
struct drm_device *dev = amdgpu_encoder->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder_atom_dig *dig;
if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
@@ -115,8 +120,6 @@ amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encode
}
}
-#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
-
static u8 amdgpu_atombios_encoder_backlight_level(struct backlight_device *bd)
{
u8 level;
@@ -149,7 +152,7 @@ amdgpu_atombios_encoder_get_backlight_brightness(struct backlight_device *bd)
struct amdgpu_backlight_privdata *pdata = bl_get_data(bd);
struct amdgpu_encoder *amdgpu_encoder = pdata->encoder;
struct drm_device *dev = amdgpu_encoder->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
return amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
}
@@ -163,12 +166,11 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode
struct drm_connector *drm_connector)
{
struct drm_device *dev = amdgpu_encoder->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct backlight_device *bd;
struct backlight_properties props;
struct amdgpu_backlight_privdata *pdata;
struct amdgpu_encoder_atom_dig *dig;
- u8 backlight_level;
char bl_name[16];
/* Mac laptops with multiple GPUs use the gmux driver for backlight
@@ -182,7 +184,12 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode
return;
if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
- return;
+ goto register_acpi_backlight;
+
+ if (!acpi_video_backlight_use_native()) {
+ drm_info(dev, "Skipping amdgpu atom DIG backlight registration\n");
+ goto register_acpi_backlight;
+ }
pdata = kmalloc(sizeof(struct amdgpu_backlight_privdata), GFP_KERNEL);
if (!pdata) {
@@ -204,13 +211,11 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode
pdata->encoder = amdgpu_encoder;
- backlight_level = amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
-
dig = amdgpu_encoder->enc_priv;
dig->bl_dev = bd;
bd->props.brightness = amdgpu_atombios_encoder_get_backlight_brightness(bd);
- bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.power = BACKLIGHT_POWER_ON;
backlight_update_status(bd);
DRM_INFO("amdgpu atom DIG backlight initialized\n");
@@ -220,13 +225,17 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode
error:
kfree(pdata);
return;
+
+register_acpi_backlight:
+ /* Try registering an ACPI video backlight device instead. */
+ acpi_video_register_backlight();
}
void
amdgpu_atombios_encoder_fini_backlight(struct amdgpu_encoder *amdgpu_encoder)
{
struct drm_device *dev = amdgpu_encoder->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct backlight_device *bd = NULL;
struct amdgpu_encoder_atom_dig *dig;
@@ -251,18 +260,6 @@ amdgpu_atombios_encoder_fini_backlight(struct amdgpu_encoder *amdgpu_encoder)
}
}
-#else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */
-
-void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *encoder)
-{
-}
-
-void amdgpu_atombios_encoder_fini_backlight(struct amdgpu_encoder *encoder)
-{
-}
-
-#endif
-
bool amdgpu_atombios_encoder_is_digital(struct drm_encoder *encoder)
{
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
@@ -316,7 +313,7 @@ static void
amdgpu_atombios_encoder_setup_dac(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
DAC_ENCODER_CONTROL_PS_ALLOCATION args;
int index = 0;
@@ -338,7 +335,7 @@ amdgpu_atombios_encoder_setup_dac(struct drm_encoder *encoder, int action)
args.ucDacStandard = ATOM_DAC1_PS2;
args.usPixelClock = cpu_to_le16(amdgpu_encoder->pixel_clock / 10);
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
@@ -379,7 +376,7 @@ static void
amdgpu_atombios_encoder_setup_dvo(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
union dvo_encoder_control args;
int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
@@ -435,7 +432,7 @@ amdgpu_atombios_encoder_setup_dvo(struct drm_encoder *encoder, int action)
break;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder)
@@ -469,7 +466,7 @@ int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder)
if (amdgpu_connector->use_digital &&
(amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE))
return ATOM_ENCODER_MODE_HDMI;
- else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
+ else if (connector->display_info.is_hdmi &&
(amdgpu_connector->audio == AMDGPU_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else if (amdgpu_connector->use_digital)
@@ -488,7 +485,7 @@ int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder)
if (amdgpu_audio != 0) {
if (amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE)
return ATOM_ENCODER_MODE_HDMI;
- else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
+ else if (connector->display_info.is_hdmi &&
(amdgpu_connector->audio == AMDGPU_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else
@@ -496,10 +493,8 @@ int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder)
} else {
return ATOM_ENCODER_MODE_DVI;
}
- break;
case DRM_MODE_CONNECTOR_LVDS:
return ATOM_ENCODER_MODE_LVDS;
- break;
case DRM_MODE_CONNECTOR_DisplayPort:
dig_connector = amdgpu_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
@@ -508,7 +503,7 @@ int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder)
} else if (amdgpu_audio != 0) {
if (amdgpu_connector->audio == AMDGPU_AUDIO_ENABLE)
return ATOM_ENCODER_MODE_HDMI;
- else if (drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) &&
+ else if (connector->display_info.is_hdmi &&
(amdgpu_connector->audio == AMDGPU_AUDIO_AUTO))
return ATOM_ENCODER_MODE_HDMI;
else
@@ -516,20 +511,16 @@ int amdgpu_atombios_encoder_get_encoder_mode(struct drm_encoder *encoder)
} else {
return ATOM_ENCODER_MODE_DVI;
}
- break;
case DRM_MODE_CONNECTOR_eDP:
return ATOM_ENCODER_MODE_DP;
case DRM_MODE_CONNECTOR_DVIA:
case DRM_MODE_CONNECTOR_VGA:
return ATOM_ENCODER_MODE_CRT;
- break;
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_9PinDIN:
/* fix me */
return ATOM_ENCODER_MODE_TV;
- /*return ATOM_ENCODER_MODE_CV;*/
- break;
}
}
@@ -570,7 +561,7 @@ amdgpu_atombios_encoder_setup_dig_encoder(struct drm_encoder *encoder,
int action, int panel_mode)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -741,7 +732,7 @@ amdgpu_atombios_encoder_setup_dig_encoder(struct drm_encoder *encoder,
break;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
@@ -759,7 +750,7 @@ amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int a
uint8_t lane_num, uint8_t lane_set)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
@@ -771,7 +762,6 @@ amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int a
int dp_clock = 0;
int dp_lane_count = 0;
int connector_object_id = 0;
- int igp_lane_info = 0;
int dig_encoder = dig->dig_encoder;
int hpd_id = AMDGPU_HPD_NONE;
@@ -854,26 +844,6 @@ amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int a
else
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
- if ((adev->flags & AMD_IS_APU) &&
- (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
- if (is_dp ||
- !amdgpu_dig_monitor_is_duallink(encoder, amdgpu_encoder->pixel_clock)) {
- if (igp_lane_info & 0x1)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
- else if (igp_lane_info & 0x2)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
- else if (igp_lane_info & 0x4)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
- else if (igp_lane_info & 0x8)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
- } else {
- if (igp_lane_info & 0x3)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
- else if (igp_lane_info & 0xc)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
- }
- }
-
if (dig->linkb)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
else
@@ -1166,7 +1136,7 @@ amdgpu_atombios_encoder_setup_dig_transmitter(struct drm_encoder *encoder, int a
break;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
bool
@@ -1175,7 +1145,7 @@ amdgpu_atombios_encoder_set_edp_panel_power(struct drm_connector *connector,
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct drm_device *dev = amdgpu_connector->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
union dig_transmitter_control args;
int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
uint8_t frev, crev;
@@ -1194,7 +1164,7 @@ amdgpu_atombios_encoder_set_edp_panel_power(struct drm_connector *connector,
args.v1.ucAction = action;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
/* wait for the panel to power up */
if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
@@ -1222,7 +1192,7 @@ amdgpu_atombios_encoder_setup_external_encoder(struct drm_encoder *encoder,
int action)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder *ext_amdgpu_encoder = to_amdgpu_encoder(ext_encoder);
union external_encoder_control args;
@@ -1318,7 +1288,7 @@ amdgpu_atombios_encoder_setup_external_encoder(struct drm_encoder *encoder,
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
return;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
static void
@@ -1463,7 +1433,7 @@ void
amdgpu_atombios_encoder_set_crtc_source(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
union crtc_source_param args;
@@ -1663,14 +1633,14 @@ amdgpu_atombios_encoder_set_crtc_source(struct drm_encoder *encoder)
return;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
/* This only needs to be called once at startup */
void
amdgpu_atombios_encoder_init_dig(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_encoder *encoder;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -1698,7 +1668,7 @@ amdgpu_atombios_encoder_dac_load_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
@@ -1736,7 +1706,7 @@ amdgpu_atombios_encoder_dac_load_detect(struct drm_encoder *encoder,
args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
}
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
return true;
} else
@@ -1748,7 +1718,7 @@ amdgpu_atombios_encoder_dac_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
uint32_t bios_0_scratch;
@@ -1787,7 +1757,7 @@ amdgpu_atombios_encoder_dig_detect(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct drm_encoder *ext_encoder = amdgpu_get_external_encoder(encoder);
@@ -1845,7 +1815,7 @@ amdgpu_atombios_encoder_set_bios_scratch_regs(struct drm_connector *connector,
bool connected)
{
struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_connector *amdgpu_connector =
to_amdgpu_connector(connector);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
@@ -1996,7 +1966,7 @@ struct amdgpu_encoder_atom_dig *
amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_mode_info *mode_info = &adev->mode_info;
int index = GetIndexIntoMasterTable(DATA, LVDS_Info);
uint16_t data_offset, misc;
@@ -2094,24 +2064,25 @@ amdgpu_atombios_encoder_get_lcd_info(struct amdgpu_encoder *encoder)
case LCD_FAKE_EDID_PATCH_RECORD_TYPE:
fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record;
if (fake_edid_record->ucFakeEDIDLength) {
- struct edid *edid;
- int edid_size =
- max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength);
- edid = kmalloc(edid_size, GFP_KERNEL);
- if (edid) {
- memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
- fake_edid_record->ucFakeEDIDLength);
-
- if (drm_edid_is_valid(edid)) {
- adev->mode_info.bios_hardcoded_edid = edid;
- adev->mode_info.bios_hardcoded_edid_size = edid_size;
- } else
- kfree(edid);
- }
+ const struct drm_edid *edid;
+ int edid_size;
+
+ if (fake_edid_record->ucFakeEDIDLength == 128)
+ edid_size = fake_edid_record->ucFakeEDIDLength;
+ else
+ edid_size = fake_edid_record->ucFakeEDIDLength * 128;
+ edid = drm_edid_alloc(fake_edid_record->ucFakeEDIDString, edid_size);
+ if (drm_edid_valid(edid))
+ adev->mode_info.bios_hardcoded_edid = edid;
+ else
+ drm_edid_free(edid);
+ record += struct_size(fake_edid_record,
+ ucFakeEDIDString,
+ edid_size);
+ } else {
+ /* empty fake edid record must be 3 bytes long */
+ record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD) + 1;
}
- record += fake_edid_record->ucFakeEDIDLength ?
- fake_edid_record->ucFakeEDIDLength + 2 :
- sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
break;
case LCD_PANEL_RESOLUTION_RECORD_TYPE:
panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
index 2bdec40515ce..f77cbdef679e 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
@@ -25,6 +25,11 @@
#define __ATOMBIOS_ENCODER_H__
u8
+amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev);
+void
+amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
+ u8 backlight_level);
+u8
amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder);
void
amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
index b374653bd6cf..a6501114322f 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_i2c.c
@@ -22,7 +22,7 @@
* Authors: Alex Deucher
*
*/
-#include <drm/drmP.h>
+
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "atom.h"
@@ -40,7 +40,7 @@ static int amdgpu_atombios_i2c_process_i2c_ch(struct amdgpu_i2c_chan *chan,
u8 *buf, u8 num)
{
struct drm_device *dev = chan->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
unsigned char *base;
@@ -65,15 +65,17 @@ static int amdgpu_atombios_i2c_process_i2c_ch(struct amdgpu_i2c_chan *chan,
args.ucRegIndex = buf[0];
if (num)
num--;
- if (num)
- memcpy(&out, &buf[1], num);
+ if (num) {
+ if (buf) {
+ memcpy(&out, &buf[1], num);
+ } else {
+ DRM_ERROR("hw i2c: missing buf with num > 1\n");
+ r = -EINVAL;
+ goto done;
+ }
+ }
args.lpI2CDataOut = cpu_to_le16(out);
} else {
- if (num > ATOM_MAX_HW_I2C_READ) {
- DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
- r = -EINVAL;
- goto done;
- }
args.ucRegIndex = 0;
args.lpI2CDataOut = 0;
}
@@ -84,7 +86,7 @@ static int amdgpu_atombios_i2c_process_i2c_ch(struct amdgpu_i2c_chan *chan,
args.ucSlaveAddr = slave_addr << 1;
args.ucLineNumber = chan->rec.i2c_id;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
/* error */
if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
@@ -157,7 +159,7 @@ u32 amdgpu_atombios_i2c_func(struct i2c_adapter *adap)
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
-void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device* adev, u8 slave_addr, u8 line_number, u8 offset, u8 data)
+void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device *adev, u8 slave_addr, u8 line_number, u8 offset, u8 data)
{
PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
@@ -170,5 +172,5 @@ void amdgpu_atombios_i2c_channel_trans(struct amdgpu_device* adev, u8 slave_addr
args.ucSlaveAddr = slave_addr;
args.ucLineNumber = line_number;
- amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args);
+ amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args, sizeof(args));
}
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
deleted file mode 100644
index 6dc1410b380f..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ /dev/null
@@ -1,7047 +0,0 @@
-/*
- * Copyright 2013 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "amdgpu_pm.h"
-#include "amdgpu_ucode.h"
-#include "cikd.h"
-#include "amdgpu_dpm.h"
-#include "ci_dpm.h"
-#include "gfx_v7_0.h"
-#include "atom.h"
-#include "amd_pcie.h"
-#include <linux/seq_file.h>
-
-#include "smu/smu_7_0_1_d.h"
-#include "smu/smu_7_0_1_sh_mask.h"
-
-#include "dce/dce_8_0_d.h"
-#include "dce/dce_8_0_sh_mask.h"
-
-#include "bif/bif_4_1_d.h"
-#include "bif/bif_4_1_sh_mask.h"
-
-#include "gca/gfx_7_2_d.h"
-#include "gca/gfx_7_2_sh_mask.h"
-
-#include "gmc/gmc_7_1_d.h"
-#include "gmc/gmc_7_1_sh_mask.h"
-
-MODULE_FIRMWARE("radeon/bonaire_smc.bin");
-MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
-MODULE_FIRMWARE("radeon/hawaii_smc.bin");
-MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
-
-#define MC_CG_ARB_FREQ_F0 0x0a
-#define MC_CG_ARB_FREQ_F1 0x0b
-#define MC_CG_ARB_FREQ_F2 0x0c
-#define MC_CG_ARB_FREQ_F3 0x0d
-
-#define SMC_RAM_END 0x40000
-
-#define VOLTAGE_SCALE 4
-#define VOLTAGE_VID_OFFSET_SCALE1 625
-#define VOLTAGE_VID_OFFSET_SCALE2 100
-
-static const struct ci_pt_defaults defaults_hawaii_xt =
-{
- 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
- { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
- { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
-};
-
-static const struct ci_pt_defaults defaults_hawaii_pro =
-{
- 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
- { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
- { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
-};
-
-static const struct ci_pt_defaults defaults_bonaire_xt =
-{
- 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
- { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
- { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
-};
-
-#if 0
-static const struct ci_pt_defaults defaults_bonaire_pro =
-{
- 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
- { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
- { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
-};
-#endif
-
-static const struct ci_pt_defaults defaults_saturn_xt =
-{
- 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
- { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
- { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
-};
-
-#if 0
-static const struct ci_pt_defaults defaults_saturn_pro =
-{
- 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
- { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
- { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
-};
-#endif
-
-static const struct ci_pt_config_reg didt_config_ci[] =
-{
- { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
- { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
- { 0xFFFFFFFF }
-};
-
-static u8 ci_get_memory_module_index(struct amdgpu_device *adev)
-{
- return (u8) ((RREG32(mmBIOS_SCRATCH_4) >> 16) & 0xff);
-}
-
-#define MC_CG_ARB_FREQ_F0 0x0a
-#define MC_CG_ARB_FREQ_F1 0x0b
-#define MC_CG_ARB_FREQ_F2 0x0c
-#define MC_CG_ARB_FREQ_F3 0x0d
-
-static int ci_copy_and_switch_arb_sets(struct amdgpu_device *adev,
- u32 arb_freq_src, u32 arb_freq_dest)
-{
- u32 mc_arb_dram_timing;
- u32 mc_arb_dram_timing2;
- u32 burst_time;
- u32 mc_cg_config;
-
- switch (arb_freq_src) {
- case MC_CG_ARB_FREQ_F0:
- mc_arb_dram_timing = RREG32(mmMC_ARB_DRAM_TIMING);
- mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
- burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK) >>
- MC_ARB_BURST_TIME__STATE0__SHIFT;
- break;
- case MC_CG_ARB_FREQ_F1:
- mc_arb_dram_timing = RREG32(mmMC_ARB_DRAM_TIMING_1);
- mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2_1);
- burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE1_MASK) >>
- MC_ARB_BURST_TIME__STATE1__SHIFT;
- break;
- default:
- return -EINVAL;
- }
-
- switch (arb_freq_dest) {
- case MC_CG_ARB_FREQ_F0:
- WREG32(mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
- WREG32(mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
- WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE0__SHIFT),
- ~MC_ARB_BURST_TIME__STATE0_MASK);
- break;
- case MC_CG_ARB_FREQ_F1:
- WREG32(mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
- WREG32(mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
- WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE1__SHIFT),
- ~MC_ARB_BURST_TIME__STATE1_MASK);
- break;
- default:
- return -EINVAL;
- }
-
- mc_cg_config = RREG32(mmMC_CG_CONFIG) | 0x0000000F;
- WREG32(mmMC_CG_CONFIG, mc_cg_config);
- WREG32_P(mmMC_ARB_CG, (arb_freq_dest) << MC_ARB_CG__CG_ARB_REQ__SHIFT,
- ~MC_ARB_CG__CG_ARB_REQ_MASK);
-
- return 0;
-}
-
-static u8 ci_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
-{
- u8 mc_para_index;
-
- if (memory_clock < 10000)
- mc_para_index = 0;
- else if (memory_clock >= 80000)
- mc_para_index = 0x0f;
- else
- mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
- return mc_para_index;
-}
-
-static u8 ci_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
-{
- u8 mc_para_index;
-
- if (strobe_mode) {
- if (memory_clock < 12500)
- mc_para_index = 0x00;
- else if (memory_clock > 47500)
- mc_para_index = 0x0f;
- else
- mc_para_index = (u8)((memory_clock - 10000) / 2500);
- } else {
- if (memory_clock < 65000)
- mc_para_index = 0x00;
- else if (memory_clock > 135000)
- mc_para_index = 0x0f;
- else
- mc_para_index = (u8)((memory_clock - 60000) / 5000);
- }
- return mc_para_index;
-}
-
-static void ci_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
- u32 max_voltage_steps,
- struct atom_voltage_table *voltage_table)
-{
- unsigned int i, diff;
-
- if (voltage_table->count <= max_voltage_steps)
- return;
-
- diff = voltage_table->count - max_voltage_steps;
-
- for (i = 0; i < max_voltage_steps; i++)
- voltage_table->entries[i] = voltage_table->entries[i + diff];
-
- voltage_table->count = max_voltage_steps;
-}
-
-static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
- struct atom_voltage_table_entry *voltage_table,
- u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
-static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
-static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
- u32 target_tdp);
-static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
-static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev);
-static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
-
-static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
- PPSMC_Msg msg, u32 parameter);
-static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev);
-static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
-
-static struct ci_power_info *ci_get_pi(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = adev->pm.dpm.priv;
-
- return pi;
-}
-
-static struct ci_ps *ci_get_ps(struct amdgpu_ps *rps)
-{
- struct ci_ps *ps = rps->ps_priv;
-
- return ps;
-}
-
-static void ci_initialize_powertune_defaults(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- switch (adev->pdev->device) {
- case 0x6649:
- case 0x6650:
- case 0x6651:
- case 0x6658:
- case 0x665C:
- case 0x665D:
- default:
- pi->powertune_defaults = &defaults_bonaire_xt;
- break;
- case 0x6640:
- case 0x6641:
- case 0x6646:
- case 0x6647:
- pi->powertune_defaults = &defaults_saturn_xt;
- break;
- case 0x67B8:
- case 0x67B0:
- pi->powertune_defaults = &defaults_hawaii_xt;
- break;
- case 0x67BA:
- case 0x67B1:
- pi->powertune_defaults = &defaults_hawaii_pro;
- break;
- case 0x67A0:
- case 0x67A1:
- case 0x67A2:
- case 0x67A8:
- case 0x67A9:
- case 0x67AA:
- case 0x67B9:
- case 0x67BE:
- pi->powertune_defaults = &defaults_bonaire_xt;
- break;
- }
-
- pi->dte_tj_offset = 0;
-
- pi->caps_power_containment = true;
- pi->caps_cac = false;
- pi->caps_sq_ramping = false;
- pi->caps_db_ramping = false;
- pi->caps_td_ramping = false;
- pi->caps_tcp_ramping = false;
-
- if (pi->caps_power_containment) {
- pi->caps_cac = true;
- if (adev->asic_type == CHIP_HAWAII)
- pi->enable_bapm_feature = false;
- else
- pi->enable_bapm_feature = true;
- pi->enable_tdc_limit_feature = true;
- pi->enable_pkg_pwr_tracking_feature = true;
- }
-}
-
-static u8 ci_convert_to_vid(u16 vddc)
-{
- return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
-}
-
-static int ci_populate_bapm_vddc_vid_sidd(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
- u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
- u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
- u32 i;
-
- if (adev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
- return -EINVAL;
- if (adev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
- return -EINVAL;
- if (adev->pm.dpm.dyn_state.cac_leakage_table.count !=
- adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
- return -EINVAL;
-
- for (i = 0; i < adev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
- lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
- hi_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
- hi2_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
- } else {
- lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
- hi_vid[i] = ci_convert_to_vid((u16)adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
- }
- }
- return 0;
-}
-
-static int ci_populate_vddc_vid(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u8 *vid = pi->smc_powertune_table.VddCVid;
- u32 i;
-
- if (pi->vddc_voltage_table.count > 8)
- return -EINVAL;
-
- for (i = 0; i < pi->vddc_voltage_table.count; i++)
- vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
-
- return 0;
-}
-
-static int ci_populate_svi_load_line(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
-
- pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
- pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
- pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
- pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
-
- return 0;
-}
-
-static int ci_populate_tdc_limit(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
- u16 tdc_limit;
-
- tdc_limit = adev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
- pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
- pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
- pt_defaults->tdc_vddc_throttle_release_limit_perc;
- pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
-
- return 0;
-}
-
-static int ci_populate_dw8(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
- int ret;
-
- ret = amdgpu_ci_read_smc_sram_dword(adev,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU7_Firmware_Header, PmFuseTable) +
- offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
- (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
- pi->sram_end);
- if (ret)
- return -EINVAL;
- else
- pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
-
- return 0;
-}
-
-static int ci_populate_fuzzy_fan(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if ((adev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
- (adev->pm.dpm.fan.fan_output_sensitivity == 0))
- adev->pm.dpm.fan.fan_output_sensitivity =
- adev->pm.dpm.fan.default_fan_output_sensitivity;
-
- pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
- cpu_to_be16(adev->pm.dpm.fan.fan_output_sensitivity);
-
- return 0;
-}
-
-static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
- u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
- int i, min, max;
-
- min = max = hi_vid[0];
- for (i = 0; i < 8; i++) {
- if (0 != hi_vid[i]) {
- if (min > hi_vid[i])
- min = hi_vid[i];
- if (max < hi_vid[i])
- max = hi_vid[i];
- }
-
- if (0 != lo_vid[i]) {
- if (min > lo_vid[i])
- min = lo_vid[i];
- if (max < lo_vid[i])
- max = lo_vid[i];
- }
- }
-
- if ((min == 0) || (max == 0))
- return -EINVAL;
- pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
- pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
-
- return 0;
-}
-
-static int ci_populate_bapm_vddc_base_leakage_sidd(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
- u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
- struct amdgpu_cac_tdp_table *cac_tdp_table =
- adev->pm.dpm.dyn_state.cac_tdp_table;
-
- hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
- lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
-
- pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
- pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
-
- return 0;
-}
-
-static int ci_populate_bapm_parameters_in_dpm_table(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
- SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table;
- struct amdgpu_cac_tdp_table *cac_tdp_table =
- adev->pm.dpm.dyn_state.cac_tdp_table;
- struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
- int i, j, k;
- const u16 *def1;
- const u16 *def2;
-
- dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
- dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
-
- dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
- dpm_table->GpuTjMax =
- (u8)(pi->thermal_temp_setting.temperature_high / 1000);
- dpm_table->GpuTjHyst = 8;
-
- dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
-
- if (ppm) {
- dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
- dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
- } else {
- dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
- dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
- }
-
- dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
- def1 = pt_defaults->bapmti_r;
- def2 = pt_defaults->bapmti_rc;
-
- for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
- for (j = 0; j < SMU7_DTE_SOURCES; j++) {
- for (k = 0; k < SMU7_DTE_SINKS; k++) {
- dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
- dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
- def1++;
- def2++;
- }
- }
- }
-
- return 0;
-}
-
-static int ci_populate_pm_base(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 pm_fuse_table_offset;
- int ret;
-
- if (pi->caps_power_containment) {
- ret = amdgpu_ci_read_smc_sram_dword(adev,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU7_Firmware_Header, PmFuseTable),
- &pm_fuse_table_offset, pi->sram_end);
- if (ret)
- return ret;
- ret = ci_populate_bapm_vddc_vid_sidd(adev);
- if (ret)
- return ret;
- ret = ci_populate_vddc_vid(adev);
- if (ret)
- return ret;
- ret = ci_populate_svi_load_line(adev);
- if (ret)
- return ret;
- ret = ci_populate_tdc_limit(adev);
- if (ret)
- return ret;
- ret = ci_populate_dw8(adev);
- if (ret)
- return ret;
- ret = ci_populate_fuzzy_fan(adev);
- if (ret)
- return ret;
- ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(adev);
- if (ret)
- return ret;
- ret = ci_populate_bapm_vddc_base_leakage_sidd(adev);
- if (ret)
- return ret;
- ret = amdgpu_ci_copy_bytes_to_smc(adev, pm_fuse_table_offset,
- (u8 *)&pi->smc_powertune_table,
- sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static void ci_do_enable_didt(struct amdgpu_device *adev, const bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 data;
-
- if (pi->caps_sq_ramping) {
- data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
- if (enable)
- data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
- else
- data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
- WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
- }
-
- if (pi->caps_db_ramping) {
- data = RREG32_DIDT(ixDIDT_DB_CTRL0);
- if (enable)
- data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
- else
- data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
- WREG32_DIDT(ixDIDT_DB_CTRL0, data);
- }
-
- if (pi->caps_td_ramping) {
- data = RREG32_DIDT(ixDIDT_TD_CTRL0);
- if (enable)
- data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
- else
- data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
- WREG32_DIDT(ixDIDT_TD_CTRL0, data);
- }
-
- if (pi->caps_tcp_ramping) {
- data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
- if (enable)
- data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
- else
- data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
- WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
- }
-}
-
-static int ci_program_pt_config_registers(struct amdgpu_device *adev,
- const struct ci_pt_config_reg *cac_config_regs)
-{
- const struct ci_pt_config_reg *config_regs = cac_config_regs;
- u32 data;
- u32 cache = 0;
-
- if (config_regs == NULL)
- return -EINVAL;
-
- while (config_regs->offset != 0xFFFFFFFF) {
- if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
- cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
- } else {
- switch (config_regs->type) {
- case CISLANDS_CONFIGREG_SMC_IND:
- data = RREG32_SMC(config_regs->offset);
- break;
- case CISLANDS_CONFIGREG_DIDT_IND:
- data = RREG32_DIDT(config_regs->offset);
- break;
- default:
- data = RREG32(config_regs->offset);
- break;
- }
-
- data &= ~config_regs->mask;
- data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
- data |= cache;
-
- switch (config_regs->type) {
- case CISLANDS_CONFIGREG_SMC_IND:
- WREG32_SMC(config_regs->offset, data);
- break;
- case CISLANDS_CONFIGREG_DIDT_IND:
- WREG32_DIDT(config_regs->offset, data);
- break;
- default:
- WREG32(config_regs->offset, data);
- break;
- }
- cache = 0;
- }
- config_regs++;
- }
- return 0;
-}
-
-static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- int ret;
-
- if (pi->caps_sq_ramping || pi->caps_db_ramping ||
- pi->caps_td_ramping || pi->caps_tcp_ramping) {
- adev->gfx.rlc.funcs->enter_safe_mode(adev);
-
- if (enable) {
- ret = ci_program_pt_config_registers(adev, didt_config_ci);
- if (ret) {
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
- return ret;
- }
- }
-
- ci_do_enable_didt(adev, enable);
-
- adev->gfx.rlc.funcs->exit_safe_mode(adev);
- }
-
- return 0;
-}
-
-static int ci_enable_power_containment(struct amdgpu_device *adev, bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- PPSMC_Result smc_result;
- int ret = 0;
-
- if (enable) {
- pi->power_containment_features = 0;
- if (pi->caps_power_containment) {
- if (pi->enable_bapm_feature) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
- if (smc_result != PPSMC_Result_OK)
- ret = -EINVAL;
- else
- pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
- }
-
- if (pi->enable_tdc_limit_feature) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitEnable);
- if (smc_result != PPSMC_Result_OK)
- ret = -EINVAL;
- else
- pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
- }
-
- if (pi->enable_pkg_pwr_tracking_feature) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitEnable);
- if (smc_result != PPSMC_Result_OK) {
- ret = -EINVAL;
- } else {
- struct amdgpu_cac_tdp_table *cac_tdp_table =
- adev->pm.dpm.dyn_state.cac_tdp_table;
- u32 default_pwr_limit =
- (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
-
- pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
-
- ci_set_power_limit(adev, default_pwr_limit);
- }
- }
- }
- } else {
- if (pi->caps_power_containment && pi->power_containment_features) {
- if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
- amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitDisable);
-
- if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
- amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
-
- if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
- amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitDisable);
- pi->power_containment_features = 0;
- }
- }
-
- return ret;
-}
-
-static int ci_enable_smc_cac(struct amdgpu_device *adev, bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- PPSMC_Result smc_result;
- int ret = 0;
-
- if (pi->caps_cac) {
- if (enable) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
- if (smc_result != PPSMC_Result_OK) {
- ret = -EINVAL;
- pi->cac_enabled = false;
- } else {
- pi->cac_enabled = true;
- }
- } else if (pi->cac_enabled) {
- amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
- pi->cac_enabled = false;
- }
- }
-
- return ret;
-}
-
-static int ci_enable_thermal_based_sclk_dpm(struct amdgpu_device *adev,
- bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- PPSMC_Result smc_result = PPSMC_Result_OK;
-
- if (pi->thermal_sclk_dpm_enabled) {
- if (enable)
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ENABLE_THERMAL_DPM);
- else
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DISABLE_THERMAL_DPM);
- }
-
- if (smc_result == PPSMC_Result_OK)
- return 0;
- else
- return -EINVAL;
-}
-
-static int ci_power_control_set_level(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct amdgpu_cac_tdp_table *cac_tdp_table =
- adev->pm.dpm.dyn_state.cac_tdp_table;
- s32 adjust_percent;
- s32 target_tdp;
- int ret = 0;
- bool adjust_polarity = false; /* ??? */
-
- if (pi->caps_power_containment) {
- adjust_percent = adjust_polarity ?
- adev->pm.dpm.tdp_adjustment : (-1 * adev->pm.dpm.tdp_adjustment);
- target_tdp = ((100 + adjust_percent) *
- (s32)cac_tdp_table->configurable_tdp) / 100;
-
- ret = ci_set_overdrive_target_tdp(adev, (u32)target_tdp);
- }
-
- return ret;
-}
-
-static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- pi->uvd_power_gated = gate;
-
- if (gate) {
- /* stop the UVD block */
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_GATE);
- ci_update_uvd_dpm(adev, gate);
- } else {
- amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
- AMD_PG_STATE_UNGATE);
- ci_update_uvd_dpm(adev, gate);
- }
-}
-
-static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
-{
- u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
- u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
-
- if (vblank_time < switch_limit)
- return true;
- else
- return false;
-
-}
-
-static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
-{
- struct ci_ps *ps = ci_get_ps(rps);
- struct ci_power_info *pi = ci_get_pi(adev);
- struct amdgpu_clock_and_voltage_limits *max_limits;
- bool disable_mclk_switching;
- u32 sclk, mclk;
- int i;
-
- if (rps->vce_active) {
- rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
- rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
- } else {
- rps->evclk = 0;
- rps->ecclk = 0;
- }
-
- if ((adev->pm.dpm.new_active_crtc_count > 1) ||
- ci_dpm_vblank_too_short(adev))
- disable_mclk_switching = true;
- else
- disable_mclk_switching = false;
-
- if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
- pi->battery_state = true;
- else
- pi->battery_state = false;
-
- if (adev->pm.dpm.ac_power)
- max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
- else
- max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
-
- if (adev->pm.dpm.ac_power == false) {
- for (i = 0; i < ps->performance_level_count; i++) {
- if (ps->performance_levels[i].mclk > max_limits->mclk)
- ps->performance_levels[i].mclk = max_limits->mclk;
- if (ps->performance_levels[i].sclk > max_limits->sclk)
- ps->performance_levels[i].sclk = max_limits->sclk;
- }
- }
-
- /* XXX validate the min clocks required for display */
-
- if (disable_mclk_switching) {
- mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
- sclk = ps->performance_levels[0].sclk;
- } else {
- mclk = ps->performance_levels[0].mclk;
- sclk = ps->performance_levels[0].sclk;
- }
-
- if (adev->pm.pm_display_cfg.min_core_set_clock > sclk)
- sclk = adev->pm.pm_display_cfg.min_core_set_clock;
-
- if (adev->pm.pm_display_cfg.min_mem_set_clock > mclk)
- mclk = adev->pm.pm_display_cfg.min_mem_set_clock;
-
- if (rps->vce_active) {
- if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
- sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
- if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
- mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
- }
-
- ps->performance_levels[0].sclk = sclk;
- ps->performance_levels[0].mclk = mclk;
-
- if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
- ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
-
- if (disable_mclk_switching) {
- if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
- ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
- } else {
- if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
- ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
- }
-}
-
-static int ci_thermal_set_temperature_range(struct amdgpu_device *adev,
- int min_temp, int max_temp)
-{
- int low_temp = 0 * 1000;
- int high_temp = 255 * 1000;
- u32 tmp;
-
- if (low_temp < min_temp)
- low_temp = min_temp;
- if (high_temp > max_temp)
- high_temp = max_temp;
- if (high_temp < low_temp) {
- DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
- return -EINVAL;
- }
-
- tmp = RREG32_SMC(ixCG_THERMAL_INT);
- tmp &= ~(CG_THERMAL_INT__DIG_THERM_INTH_MASK | CG_THERMAL_INT__DIG_THERM_INTL_MASK);
- tmp |= ((high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT) |
- ((low_temp / 1000)) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT;
- WREG32_SMC(ixCG_THERMAL_INT, tmp);
-
-#if 0
- /* XXX: need to figure out how to handle this properly */
- tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
- tmp &= DIG_THERM_DPM_MASK;
- tmp |= DIG_THERM_DPM(high_temp / 1000);
- WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
-#endif
-
- adev->pm.dpm.thermal.min_temp = low_temp;
- adev->pm.dpm.thermal.max_temp = high_temp;
- return 0;
-}
-
-static int ci_thermal_enable_alert(struct amdgpu_device *adev,
- bool enable)
-{
- u32 thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
- PPSMC_Result result;
-
- if (enable) {
- thermal_int &= ~(CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
- CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
- WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
- result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Enable);
- if (result != PPSMC_Result_OK) {
- DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
- return -EINVAL;
- }
- } else {
- thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
- CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
- WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
- result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Disable);
- if (result != PPSMC_Result_OK) {
- DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static void ci_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 tmp;
-
- if (pi->fan_ctrl_is_in_default_mode) {
- tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK)
- >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
- pi->fan_ctrl_default_mode = tmp;
- tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK)
- >> CG_FDO_CTRL2__TMIN__SHIFT;
- pi->t_min = tmp;
- pi->fan_ctrl_is_in_default_mode = false;
- }
-
- tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
- tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
- WREG32_SMC(ixCG_FDO_CTRL2, tmp);
-
- tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
- tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
- WREG32_SMC(ixCG_FDO_CTRL2, tmp);
-}
-
-static int ci_thermal_setup_fan_table(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
- u32 duty100;
- u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
- u16 fdo_min, slope1, slope2;
- u32 reference_clock, tmp;
- int ret;
- u64 tmp64;
-
- if (!pi->fan_table_start) {
- adev->pm.dpm.fan.ucode_fan_control = false;
- return 0;
- }
-
- duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
- >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
-
- if (duty100 == 0) {
- adev->pm.dpm.fan.ucode_fan_control = false;
- return 0;
- }
-
- tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
- do_div(tmp64, 10000);
- fdo_min = (u16)tmp64;
-
- t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
- t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
-
- pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
- pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
-
- slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
- slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
-
- fan_table.TempMin = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
- fan_table.TempMed = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
- fan_table.TempMax = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
-
- fan_table.Slope1 = cpu_to_be16(slope1);
- fan_table.Slope2 = cpu_to_be16(slope2);
-
- fan_table.FdoMin = cpu_to_be16(fdo_min);
-
- fan_table.HystDown = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
-
- fan_table.HystUp = cpu_to_be16(1);
-
- fan_table.HystSlope = cpu_to_be16(1);
-
- fan_table.TempRespLim = cpu_to_be16(5);
-
- reference_clock = amdgpu_asic_get_xclk(adev);
-
- fan_table.RefreshPeriod = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
- reference_clock) / 1600);
-
- fan_table.FdoMax = cpu_to_be16((u16)duty100);
-
- tmp = (RREG32_SMC(ixCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK)
- >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
- fan_table.TempSrc = (uint8_t)tmp;
-
- ret = amdgpu_ci_copy_bytes_to_smc(adev,
- pi->fan_table_start,
- (u8 *)(&fan_table),
- sizeof(fan_table),
- pi->sram_end);
-
- if (ret) {
- DRM_ERROR("Failed to load fan table to the SMC.");
- adev->pm.dpm.fan.ucode_fan_control = false;
- }
-
- return 0;
-}
-
-static int ci_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- PPSMC_Result ret;
-
- if (pi->caps_od_fuzzy_fan_control_support) {
- ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_StartFanControl,
- FAN_CONTROL_FUZZY);
- if (ret != PPSMC_Result_OK)
- return -EINVAL;
- ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SetFanPwmMax,
- adev->pm.dpm.fan.default_max_fan_pwm);
- if (ret != PPSMC_Result_OK)
- return -EINVAL;
- } else {
- ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_StartFanControl,
- FAN_CONTROL_TABLE);
- if (ret != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- pi->fan_is_controlled_by_smc = true;
- return 0;
-}
-
-
-static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
-{
- PPSMC_Result ret;
- struct ci_power_info *pi = ci_get_pi(adev);
-
- ret = amdgpu_ci_send_msg_to_smc(adev, PPSMC_StopFanControl);
- if (ret == PPSMC_Result_OK) {
- pi->fan_is_controlled_by_smc = false;
- return 0;
- } else {
- return -EINVAL;
- }
-}
-
-static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
- u32 *speed)
-{
- u32 duty, duty100;
- u64 tmp64;
-
- if (adev->pm.no_fan)
- return -ENOENT;
-
- duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
- >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
- duty = (RREG32_SMC(ixCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK)
- >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
-
- if (duty100 == 0)
- return -EINVAL;
-
- tmp64 = (u64)duty * 100;
- do_div(tmp64, duty100);
- *speed = (u32)tmp64;
-
- if (*speed > 100)
- *speed = 100;
-
- return 0;
-}
-
-static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
- u32 speed)
-{
- u32 tmp;
- u32 duty, duty100;
- u64 tmp64;
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (adev->pm.no_fan)
- return -ENOENT;
-
- if (pi->fan_is_controlled_by_smc)
- return -EINVAL;
-
- if (speed > 100)
- return -EINVAL;
-
- duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
- >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
-
- if (duty100 == 0)
- return -EINVAL;
-
- tmp64 = (u64)speed * duty100;
- do_div(tmp64, 100);
- duty = (u32)tmp64;
-
- tmp = RREG32_SMC(ixCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
- tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
- WREG32_SMC(ixCG_FDO_CTRL0, tmp);
-
- return 0;
-}
-
-static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
-{
- switch (mode) {
- case AMD_FAN_CTRL_NONE:
- if (adev->pm.dpm.fan.ucode_fan_control)
- ci_fan_ctrl_stop_smc_fan_control(adev);
- ci_dpm_set_fan_speed_percent(adev, 100);
- break;
- case AMD_FAN_CTRL_MANUAL:
- if (adev->pm.dpm.fan.ucode_fan_control)
- ci_fan_ctrl_stop_smc_fan_control(adev);
- break;
- case AMD_FAN_CTRL_AUTO:
- if (adev->pm.dpm.fan.ucode_fan_control)
- ci_thermal_start_smc_fan_control(adev);
- break;
- default:
- break;
- }
-}
-
-static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (pi->fan_is_controlled_by_smc)
- return AMD_FAN_CTRL_AUTO;
- else
- return AMD_FAN_CTRL_MANUAL;
-}
-
-#if 0
-static int ci_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
- u32 *speed)
-{
- u32 tach_period;
- u32 xclk = amdgpu_asic_get_xclk(adev);
-
- if (adev->pm.no_fan)
- return -ENOENT;
-
- if (adev->pm.fan_pulses_per_revolution == 0)
- return -ENOENT;
-
- tach_period = (RREG32_SMC(ixCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK)
- >> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
- if (tach_period == 0)
- return -ENOENT;
-
- *speed = 60 * xclk * 10000 / tach_period;
-
- return 0;
-}
-
-static int ci_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
- u32 speed)
-{
- u32 tach_period, tmp;
- u32 xclk = amdgpu_asic_get_xclk(adev);
-
- if (adev->pm.no_fan)
- return -ENOENT;
-
- if (adev->pm.fan_pulses_per_revolution == 0)
- return -ENOENT;
-
- if ((speed < adev->pm.fan_min_rpm) ||
- (speed > adev->pm.fan_max_rpm))
- return -EINVAL;
-
- if (adev->pm.dpm.fan.ucode_fan_control)
- ci_fan_ctrl_stop_smc_fan_control(adev);
-
- tach_period = 60 * xclk * 10000 / (8 * speed);
- tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
- tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
- WREG32_SMC(CG_TACH_CTRL, tmp);
-
- ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
-
- return 0;
-}
-#endif
-
-static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 tmp;
-
- if (!pi->fan_ctrl_is_in_default_mode) {
- tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
- tmp |= pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
- WREG32_SMC(ixCG_FDO_CTRL2, tmp);
-
- tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
- tmp |= pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
- WREG32_SMC(ixCG_FDO_CTRL2, tmp);
- pi->fan_ctrl_is_in_default_mode = true;
- }
-}
-
-static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev)
-{
- if (adev->pm.dpm.fan.ucode_fan_control) {
- ci_fan_ctrl_start_smc_fan_control(adev);
- ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
- }
-}
-
-static void ci_thermal_initialize(struct amdgpu_device *adev)
-{
- u32 tmp;
-
- if (adev->pm.fan_pulses_per_revolution) {
- tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
- tmp |= (adev->pm.fan_pulses_per_revolution - 1)
- << CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
- WREG32_SMC(ixCG_TACH_CTRL, tmp);
- }
-
- tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
- tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
- WREG32_SMC(ixCG_FDO_CTRL2, tmp);
-}
-
-static int ci_thermal_start_thermal_controller(struct amdgpu_device *adev)
-{
- int ret;
-
- ci_thermal_initialize(adev);
- ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, CISLANDS_TEMP_RANGE_MAX);
- if (ret)
- return ret;
- ret = ci_thermal_enable_alert(adev, true);
- if (ret)
- return ret;
- if (adev->pm.dpm.fan.ucode_fan_control) {
- ret = ci_thermal_setup_fan_table(adev);
- if (ret)
- return ret;
- ci_thermal_start_smc_fan_control(adev);
- }
-
- return 0;
-}
-
-static void ci_thermal_stop_thermal_controller(struct amdgpu_device *adev)
-{
- if (!adev->pm.no_fan)
- ci_fan_ctrl_set_default_mode(adev);
-}
-
-static int ci_read_smc_soft_register(struct amdgpu_device *adev,
- u16 reg_offset, u32 *value)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- return amdgpu_ci_read_smc_sram_dword(adev,
- pi->soft_regs_start + reg_offset,
- value, pi->sram_end);
-}
-
-static int ci_write_smc_soft_register(struct amdgpu_device *adev,
- u16 reg_offset, u32 value)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- return amdgpu_ci_write_smc_sram_dword(adev,
- pi->soft_regs_start + reg_offset,
- value, pi->sram_end);
-}
-
-static void ci_init_fps_limits(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
-
- if (pi->caps_fps) {
- u16 tmp;
-
- tmp = 45;
- table->FpsHighT = cpu_to_be16(tmp);
-
- tmp = 30;
- table->FpsLowT = cpu_to_be16(tmp);
- }
-}
-
-static int ci_update_sclk_t(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- int ret = 0;
- u32 low_sclk_interrupt_t = 0;
-
- if (pi->caps_sclk_throttle_low_notification) {
- low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
-
- ret = amdgpu_ci_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
- (u8 *)&low_sclk_interrupt_t,
- sizeof(u32), pi->sram_end);
-
- }
-
- return ret;
-}
-
-static void ci_get_leakage_voltages(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u16 leakage_id, virtual_voltage_id;
- u16 vddc, vddci;
- int i;
-
- pi->vddc_leakage.count = 0;
- pi->vddci_leakage.count = 0;
-
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
- for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
- virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
- if (amdgpu_atombios_get_voltage_evv(adev, virtual_voltage_id, &vddc) != 0)
- continue;
- if (vddc != 0 && vddc != virtual_voltage_id) {
- pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
- pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
- pi->vddc_leakage.count++;
- }
- }
- } else if (amdgpu_atombios_get_leakage_id_from_vbios(adev, &leakage_id) == 0) {
- for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
- virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
- if (amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(adev, &vddc, &vddci,
- virtual_voltage_id,
- leakage_id) == 0) {
- if (vddc != 0 && vddc != virtual_voltage_id) {
- pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
- pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
- pi->vddc_leakage.count++;
- }
- if (vddci != 0 && vddci != virtual_voltage_id) {
- pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
- pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
- pi->vddci_leakage.count++;
- }
- }
- }
- }
-}
-
-static void ci_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- bool want_thermal_protection;
- enum amdgpu_dpm_event_src dpm_event_src;
- u32 tmp;
-
- switch (sources) {
- case 0:
- default:
- want_thermal_protection = false;
- break;
- case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
- want_thermal_protection = true;
- dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
- break;
- case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
- want_thermal_protection = true;
- dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
- break;
- case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
- (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
- want_thermal_protection = true;
- dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
- break;
- }
-
- if (want_thermal_protection) {
-#if 0
- /* XXX: need to figure out how to handle this properly */
- tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
- tmp &= DPM_EVENT_SRC_MASK;
- tmp |= DPM_EVENT_SRC(dpm_event_src);
- WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
-#endif
-
- tmp = RREG32_SMC(ixGENERAL_PWRMGT);
- if (pi->thermal_protection)
- tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
- else
- tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
- WREG32_SMC(ixGENERAL_PWRMGT, tmp);
- } else {
- tmp = RREG32_SMC(ixGENERAL_PWRMGT);
- tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
- WREG32_SMC(ixGENERAL_PWRMGT, tmp);
- }
-}
-
-static void ci_enable_auto_throttle_source(struct amdgpu_device *adev,
- enum amdgpu_dpm_auto_throttle_src source,
- bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (enable) {
- if (!(pi->active_auto_throttle_sources & (1 << source))) {
- pi->active_auto_throttle_sources |= 1 << source;
- ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
- }
- } else {
- if (pi->active_auto_throttle_sources & (1 << source)) {
- pi->active_auto_throttle_sources &= ~(1 << source);
- ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
- }
- }
-}
-
-static void ci_enable_vr_hot_gpio_interrupt(struct amdgpu_device *adev)
-{
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
- amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
-}
-
-static int ci_unfreeze_sclk_mclk_dpm(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- PPSMC_Result smc_result;
-
- if (!pi->need_update_smu7_dpm_table)
- return 0;
-
- if ((!pi->sclk_dpm_key_disabled) &&
- (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- if ((!pi->mclk_dpm_key_disabled) &&
- (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- pi->need_update_smu7_dpm_table = 0;
- return 0;
-}
-
-static int ci_enable_sclk_mclk_dpm(struct amdgpu_device *adev, bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- PPSMC_Result smc_result;
-
- if (enable) {
- if (!pi->sclk_dpm_key_disabled) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Enable);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- if (!pi->mclk_dpm_key_disabled) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Enable);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
-
- WREG32_P(mmMC_SEQ_CNTL_3, MC_SEQ_CNTL_3__CAC_EN_MASK,
- ~MC_SEQ_CNTL_3__CAC_EN_MASK);
-
- WREG32_SMC(ixLCAC_MC0_CNTL, 0x05);
- WREG32_SMC(ixLCAC_MC1_CNTL, 0x05);
- WREG32_SMC(ixLCAC_CPL_CNTL, 0x100005);
-
- udelay(10);
-
- WREG32_SMC(ixLCAC_MC0_CNTL, 0x400005);
- WREG32_SMC(ixLCAC_MC1_CNTL, 0x400005);
- WREG32_SMC(ixLCAC_CPL_CNTL, 0x500005);
- }
- } else {
- if (!pi->sclk_dpm_key_disabled) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Disable);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- if (!pi->mclk_dpm_key_disabled) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Disable);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static int ci_start_dpm(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- PPSMC_Result smc_result;
- int ret;
- u32 tmp;
-
- tmp = RREG32_SMC(ixGENERAL_PWRMGT);
- tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
- WREG32_SMC(ixGENERAL_PWRMGT, tmp);
-
- tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
- tmp |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
- WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
-
- ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
-
- WREG32_P(mmBIF_LNCNT_RESET, 0, ~BIF_LNCNT_RESET__RESET_LNCNT_EN_MASK);
-
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Enable);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
-
- ret = ci_enable_sclk_mclk_dpm(adev, true);
- if (ret)
- return ret;
-
- if (!pi->pcie_dpm_key_disabled) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Enable);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ci_freeze_sclk_mclk_dpm(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- PPSMC_Result smc_result;
-
- if (!pi->need_update_smu7_dpm_table)
- return 0;
-
- if ((!pi->sclk_dpm_key_disabled) &&
- (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_FreezeLevel);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- if ((!pi->mclk_dpm_key_disabled) &&
- (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_FreezeLevel);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ci_stop_dpm(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- PPSMC_Result smc_result;
- int ret;
- u32 tmp;
-
- tmp = RREG32_SMC(ixGENERAL_PWRMGT);
- tmp &= ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
- WREG32_SMC(ixGENERAL_PWRMGT, tmp);
-
- tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
- tmp &= ~SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
- WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
-
- if (!pi->pcie_dpm_key_disabled) {
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Disable);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- ret = ci_enable_sclk_mclk_dpm(adev, false);
- if (ret)
- return ret;
-
- smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Disable);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
-
- return 0;
-}
-
-static void ci_enable_sclk_control(struct amdgpu_device *adev, bool enable)
-{
- u32 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
-
- if (enable)
- tmp &= ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
- else
- tmp |= SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
- WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
-}
-
-#if 0
-static int ci_notify_hw_of_power_source(struct amdgpu_device *adev,
- bool ac_power)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct amdgpu_cac_tdp_table *cac_tdp_table =
- adev->pm.dpm.dyn_state.cac_tdp_table;
- u32 power_limit;
-
- if (ac_power)
- power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
- else
- power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
-
- ci_set_power_limit(adev, power_limit);
-
- if (pi->caps_automatic_dc_transition) {
- if (ac_power)
- amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC);
- else
- amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Remove_DC_Clamp);
- }
-
- return 0;
-}
-#endif
-
-static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
- PPSMC_Msg msg, u32 parameter)
-{
- WREG32(mmSMC_MSG_ARG_0, parameter);
- return amdgpu_ci_send_msg_to_smc(adev, msg);
-}
-
-static PPSMC_Result amdgpu_ci_send_msg_to_smc_return_parameter(struct amdgpu_device *adev,
- PPSMC_Msg msg, u32 *parameter)
-{
- PPSMC_Result smc_result;
-
- smc_result = amdgpu_ci_send_msg_to_smc(adev, msg);
-
- if ((smc_result == PPSMC_Result_OK) && parameter)
- *parameter = RREG32(mmSMC_MSG_ARG_0);
-
- return smc_result;
-}
-
-static int ci_dpm_force_state_sclk(struct amdgpu_device *adev, u32 n)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (!pi->sclk_dpm_key_disabled) {
- PPSMC_Result smc_result =
- amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ci_dpm_force_state_mclk(struct amdgpu_device *adev, u32 n)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (!pi->mclk_dpm_key_disabled) {
- PPSMC_Result smc_result =
- amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ci_dpm_force_state_pcie(struct amdgpu_device *adev, u32 n)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (!pi->pcie_dpm_key_disabled) {
- PPSMC_Result smc_result =
- amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ci_set_power_limit(struct amdgpu_device *adev, u32 n)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
- PPSMC_Result smc_result =
- amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PkgPwrSetLimit, n);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
- u32 target_tdp)
-{
- PPSMC_Result smc_result =
- amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- return 0;
-}
-
-#if 0
-static int ci_set_boot_state(struct amdgpu_device *adev)
-{
- return ci_enable_sclk_mclk_dpm(adev, false);
-}
-#endif
-
-static u32 ci_get_average_sclk_freq(struct amdgpu_device *adev)
-{
- u32 sclk_freq;
- PPSMC_Result smc_result =
- amdgpu_ci_send_msg_to_smc_return_parameter(adev,
- PPSMC_MSG_API_GetSclkFrequency,
- &sclk_freq);
- if (smc_result != PPSMC_Result_OK)
- sclk_freq = 0;
-
- return sclk_freq;
-}
-
-static u32 ci_get_average_mclk_freq(struct amdgpu_device *adev)
-{
- u32 mclk_freq;
- PPSMC_Result smc_result =
- amdgpu_ci_send_msg_to_smc_return_parameter(adev,
- PPSMC_MSG_API_GetMclkFrequency,
- &mclk_freq);
- if (smc_result != PPSMC_Result_OK)
- mclk_freq = 0;
-
- return mclk_freq;
-}
-
-static void ci_dpm_start_smc(struct amdgpu_device *adev)
-{
- int i;
-
- amdgpu_ci_program_jump_on_start(adev);
- amdgpu_ci_start_smc_clock(adev);
- amdgpu_ci_start_smc(adev);
- for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32_SMC(ixFIRMWARE_FLAGS) & FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
- break;
- }
-}
-
-static void ci_dpm_stop_smc(struct amdgpu_device *adev)
-{
- amdgpu_ci_reset_smc(adev);
- amdgpu_ci_stop_smc_clock(adev);
-}
-
-static int ci_process_firmware_header(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 tmp;
- int ret;
-
- ret = amdgpu_ci_read_smc_sram_dword(adev,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU7_Firmware_Header, DpmTable),
- &tmp, pi->sram_end);
- if (ret)
- return ret;
-
- pi->dpm_table_start = tmp;
-
- ret = amdgpu_ci_read_smc_sram_dword(adev,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU7_Firmware_Header, SoftRegisters),
- &tmp, pi->sram_end);
- if (ret)
- return ret;
-
- pi->soft_regs_start = tmp;
-
- ret = amdgpu_ci_read_smc_sram_dword(adev,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU7_Firmware_Header, mcRegisterTable),
- &tmp, pi->sram_end);
- if (ret)
- return ret;
-
- pi->mc_reg_table_start = tmp;
-
- ret = amdgpu_ci_read_smc_sram_dword(adev,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU7_Firmware_Header, FanTable),
- &tmp, pi->sram_end);
- if (ret)
- return ret;
-
- pi->fan_table_start = tmp;
-
- ret = amdgpu_ci_read_smc_sram_dword(adev,
- SMU7_FIRMWARE_HEADER_LOCATION +
- offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
- &tmp, pi->sram_end);
- if (ret)
- return ret;
-
- pi->arb_table_start = tmp;
-
- return 0;
-}
-
-static void ci_read_clock_registers(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- pi->clock_registers.cg_spll_func_cntl =
- RREG32_SMC(ixCG_SPLL_FUNC_CNTL);
- pi->clock_registers.cg_spll_func_cntl_2 =
- RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2);
- pi->clock_registers.cg_spll_func_cntl_3 =
- RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3);
- pi->clock_registers.cg_spll_func_cntl_4 =
- RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4);
- pi->clock_registers.cg_spll_spread_spectrum =
- RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
- pi->clock_registers.cg_spll_spread_spectrum_2 =
- RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2);
- pi->clock_registers.dll_cntl = RREG32(mmDLL_CNTL);
- pi->clock_registers.mclk_pwrmgt_cntl = RREG32(mmMCLK_PWRMGT_CNTL);
- pi->clock_registers.mpll_ad_func_cntl = RREG32(mmMPLL_AD_FUNC_CNTL);
- pi->clock_registers.mpll_dq_func_cntl = RREG32(mmMPLL_DQ_FUNC_CNTL);
- pi->clock_registers.mpll_func_cntl = RREG32(mmMPLL_FUNC_CNTL);
- pi->clock_registers.mpll_func_cntl_1 = RREG32(mmMPLL_FUNC_CNTL_1);
- pi->clock_registers.mpll_func_cntl_2 = RREG32(mmMPLL_FUNC_CNTL_2);
- pi->clock_registers.mpll_ss1 = RREG32(mmMPLL_SS1);
- pi->clock_registers.mpll_ss2 = RREG32(mmMPLL_SS2);
-}
-
-static void ci_init_sclk_t(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- pi->low_sclk_interrupt_t = 0;
-}
-
-static void ci_enable_thermal_protection(struct amdgpu_device *adev,
- bool enable)
-{
- u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
-
- if (enable)
- tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
- else
- tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
- WREG32_SMC(ixGENERAL_PWRMGT, tmp);
-}
-
-static void ci_enable_acpi_power_management(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
-
- tmp |= GENERAL_PWRMGT__STATIC_PM_EN_MASK;
-
- WREG32_SMC(ixGENERAL_PWRMGT, tmp);
-}
-
-#if 0
-static int ci_enter_ulp_state(struct amdgpu_device *adev)
-{
-
- WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
-
- udelay(25000);
-
- return 0;
-}
-
-static int ci_exit_ulp_state(struct amdgpu_device *adev)
-{
- int i;
-
- WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
-
- udelay(7000);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32(mmSMC_RESP_0) == 1)
- break;
- udelay(1000);
- }
-
- return 0;
-}
-#endif
-
-static int ci_notify_smc_display_change(struct amdgpu_device *adev,
- bool has_display)
-{
- PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
-
- return (amdgpu_ci_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL;
-}
-
-static int ci_enable_ds_master_switch(struct amdgpu_device *adev,
- bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (enable) {
- if (pi->caps_sclk_ds) {
- if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
- return -EINVAL;
- } else {
- if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
- return -EINVAL;
- }
- } else {
- if (pi->caps_sclk_ds) {
- if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static void ci_program_display_gap(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
- u32 pre_vbi_time_in_us;
- u32 frame_time_in_us;
- u32 ref_clock = adev->clock.spll.reference_freq;
- u32 refresh_rate = amdgpu_dpm_get_vrefresh(adev);
- u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
-
- tmp &= ~CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK;
- if (adev->pm.dpm.new_active_crtc_count > 0)
- tmp |= (AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
- else
- tmp |= (AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
- WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
-
- if (refresh_rate == 0)
- refresh_rate = 60;
- if (vblank_time == 0xffffffff)
- vblank_time = 500;
- frame_time_in_us = 1000000 / refresh_rate;
- pre_vbi_time_in_us =
- frame_time_in_us - 200 - vblank_time;
- tmp = pre_vbi_time_in_us * (ref_clock / 100);
-
- WREG32_SMC(ixCG_DISPLAY_GAP_CNTL2, tmp);
- ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
- ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
-
-
- ci_notify_smc_display_change(adev, (adev->pm.dpm.new_active_crtc_count == 1));
-
-}
-
-static void ci_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 tmp;
-
- if (enable) {
- if (pi->caps_sclk_ss_support) {
- tmp = RREG32_SMC(ixGENERAL_PWRMGT);
- tmp |= GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
- WREG32_SMC(ixGENERAL_PWRMGT, tmp);
- }
- } else {
- tmp = RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
- tmp &= ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
- WREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM, tmp);
-
- tmp = RREG32_SMC(ixGENERAL_PWRMGT);
- tmp &= ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
- WREG32_SMC(ixGENERAL_PWRMGT, tmp);
- }
-}
-
-static void ci_program_sstp(struct amdgpu_device *adev)
-{
- WREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER,
- ((CISLANDS_SSTU_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT) |
- (CISLANDS_SST_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT)));
-}
-
-static void ci_enable_display_gap(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
-
- tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK |
- CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK);
- tmp |= ((AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT) |
- (AMDGPU_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT));
-
- WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
-}
-
-static void ci_program_vc(struct amdgpu_device *adev)
-{
- u32 tmp;
-
- tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
- tmp &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
- WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
-
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, CISLANDS_VRC_DFLT0);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, CISLANDS_VRC_DFLT1);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, CISLANDS_VRC_DFLT2);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, CISLANDS_VRC_DFLT3);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, CISLANDS_VRC_DFLT4);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, CISLANDS_VRC_DFLT5);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, CISLANDS_VRC_DFLT6);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, CISLANDS_VRC_DFLT7);
-}
-
-static void ci_clear_vc(struct amdgpu_device *adev)
-{
- u32 tmp;
-
- tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
- tmp |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
- WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
-
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, 0);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, 0);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, 0);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, 0);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, 0);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, 0);
- WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, 0);
-}
-
-static int ci_upload_firmware(struct amdgpu_device *adev)
-{
- int i, ret;
-
- if (amdgpu_ci_is_smc_running(adev)) {
- DRM_INFO("smc is running, no need to load smc firmware\n");
- return 0;
- }
-
- for (i = 0; i < adev->usec_timeout; i++) {
- if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
- break;
- }
- WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, 1);
-
- amdgpu_ci_stop_smc_clock(adev);
- amdgpu_ci_reset_smc(adev);
-
- ret = amdgpu_ci_load_smc_ucode(adev, SMC_RAM_END);
-
- return ret;
-
-}
-
-static int ci_get_svi2_voltage_table(struct amdgpu_device *adev,
- struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
- struct atom_voltage_table *voltage_table)
-{
- u32 i;
-
- if (voltage_dependency_table == NULL)
- return -EINVAL;
-
- voltage_table->mask_low = 0;
- voltage_table->phase_delay = 0;
-
- voltage_table->count = voltage_dependency_table->count;
- for (i = 0; i < voltage_table->count; i++) {
- voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
- voltage_table->entries[i].smio_low = 0;
- }
-
- return 0;
-}
-
-static int ci_construct_voltage_tables(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- int ret;
-
- if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
- ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
- VOLTAGE_OBJ_GPIO_LUT,
- &pi->vddc_voltage_table);
- if (ret)
- return ret;
- } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
- ret = ci_get_svi2_voltage_table(adev,
- &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
- &pi->vddc_voltage_table);
- if (ret)
- return ret;
- }
-
- if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
- ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDC,
- &pi->vddc_voltage_table);
-
- if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
- ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
- VOLTAGE_OBJ_GPIO_LUT,
- &pi->vddci_voltage_table);
- if (ret)
- return ret;
- } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
- ret = ci_get_svi2_voltage_table(adev,
- &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
- &pi->vddci_voltage_table);
- if (ret)
- return ret;
- }
-
- if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
- ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDCI,
- &pi->vddci_voltage_table);
-
- if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
- ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
- VOLTAGE_OBJ_GPIO_LUT,
- &pi->mvdd_voltage_table);
- if (ret)
- return ret;
- } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
- ret = ci_get_svi2_voltage_table(adev,
- &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
- &pi->mvdd_voltage_table);
- if (ret)
- return ret;
- }
-
- if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
- ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_MVDD,
- &pi->mvdd_voltage_table);
-
- return 0;
-}
-
-static void ci_populate_smc_voltage_table(struct amdgpu_device *adev,
- struct atom_voltage_table_entry *voltage_table,
- SMU7_Discrete_VoltageLevel *smc_voltage_table)
-{
- int ret;
-
- ret = ci_get_std_voltage_value_sidd(adev, voltage_table,
- &smc_voltage_table->StdVoltageHiSidd,
- &smc_voltage_table->StdVoltageLoSidd);
-
- if (ret) {
- smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
- smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
- }
-
- smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
- smc_voltage_table->StdVoltageHiSidd =
- cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
- smc_voltage_table->StdVoltageLoSidd =
- cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
-}
-
-static int ci_populate_smc_vddc_table(struct amdgpu_device *adev,
- SMU7_Discrete_DpmTable *table)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- unsigned int count;
-
- table->VddcLevelCount = pi->vddc_voltage_table.count;
- for (count = 0; count < table->VddcLevelCount; count++) {
- ci_populate_smc_voltage_table(adev,
- &pi->vddc_voltage_table.entries[count],
- &table->VddcLevel[count]);
-
- if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
- table->VddcLevel[count].Smio |=
- pi->vddc_voltage_table.entries[count].smio_low;
- else
- table->VddcLevel[count].Smio = 0;
- }
- table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
-
- return 0;
-}
-
-static int ci_populate_smc_vddci_table(struct amdgpu_device *adev,
- SMU7_Discrete_DpmTable *table)
-{
- unsigned int count;
- struct ci_power_info *pi = ci_get_pi(adev);
-
- table->VddciLevelCount = pi->vddci_voltage_table.count;
- for (count = 0; count < table->VddciLevelCount; count++) {
- ci_populate_smc_voltage_table(adev,
- &pi->vddci_voltage_table.entries[count],
- &table->VddciLevel[count]);
-
- if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
- table->VddciLevel[count].Smio |=
- pi->vddci_voltage_table.entries[count].smio_low;
- else
- table->VddciLevel[count].Smio = 0;
- }
- table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
-
- return 0;
-}
-
-static int ci_populate_smc_mvdd_table(struct amdgpu_device *adev,
- SMU7_Discrete_DpmTable *table)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- unsigned int count;
-
- table->MvddLevelCount = pi->mvdd_voltage_table.count;
- for (count = 0; count < table->MvddLevelCount; count++) {
- ci_populate_smc_voltage_table(adev,
- &pi->mvdd_voltage_table.entries[count],
- &table->MvddLevel[count]);
-
- if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
- table->MvddLevel[count].Smio |=
- pi->mvdd_voltage_table.entries[count].smio_low;
- else
- table->MvddLevel[count].Smio = 0;
- }
- table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
-
- return 0;
-}
-
-static int ci_populate_smc_voltage_tables(struct amdgpu_device *adev,
- SMU7_Discrete_DpmTable *table)
-{
- int ret;
-
- ret = ci_populate_smc_vddc_table(adev, table);
- if (ret)
- return ret;
-
- ret = ci_populate_smc_vddci_table(adev, table);
- if (ret)
- return ret;
-
- ret = ci_populate_smc_mvdd_table(adev, table);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int ci_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
- SMU7_Discrete_VoltageLevel *voltage)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 i = 0;
-
- if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
- for (i = 0; i < adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
- if (mclk <= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
- voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
- break;
- }
- }
-
- if (i >= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
- return -EINVAL;
- }
-
- return -EINVAL;
-}
-
-static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
- struct atom_voltage_table_entry *voltage_table,
- u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
-{
- u16 v_index, idx;
- bool voltage_found = false;
- *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
- *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
-
- if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
- return -EINVAL;
-
- if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
- for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
- if (voltage_table->value ==
- adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
- voltage_found = true;
- if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
- idx = v_index;
- else
- idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
- *std_voltage_lo_sidd =
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
- *std_voltage_hi_sidd =
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
- break;
- }
- }
-
- if (!voltage_found) {
- for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
- if (voltage_table->value <=
- adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
- voltage_found = true;
- if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
- idx = v_index;
- else
- idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
- *std_voltage_lo_sidd =
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
- *std_voltage_hi_sidd =
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
- break;
- }
- }
- }
- }
-
- return 0;
-}
-
-static void ci_populate_phase_value_based_on_sclk(struct amdgpu_device *adev,
- const struct amdgpu_phase_shedding_limits_table *limits,
- u32 sclk,
- u32 *phase_shedding)
-{
- unsigned int i;
-
- *phase_shedding = 1;
-
- for (i = 0; i < limits->count; i++) {
- if (sclk < limits->entries[i].sclk) {
- *phase_shedding = i;
- break;
- }
- }
-}
-
-static void ci_populate_phase_value_based_on_mclk(struct amdgpu_device *adev,
- const struct amdgpu_phase_shedding_limits_table *limits,
- u32 mclk,
- u32 *phase_shedding)
-{
- unsigned int i;
-
- *phase_shedding = 1;
-
- for (i = 0; i < limits->count; i++) {
- if (mclk < limits->entries[i].mclk) {
- *phase_shedding = i;
- break;
- }
- }
-}
-
-static int ci_init_arb_table_index(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 tmp;
- int ret;
-
- ret = amdgpu_ci_read_smc_sram_dword(adev, pi->arb_table_start,
- &tmp, pi->sram_end);
- if (ret)
- return ret;
-
- tmp &= 0x00FFFFFF;
- tmp |= MC_CG_ARB_FREQ_F1 << 24;
-
- return amdgpu_ci_write_smc_sram_dword(adev, pi->arb_table_start,
- tmp, pi->sram_end);
-}
-
-static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev,
- struct amdgpu_clock_voltage_dependency_table *allowed_clock_voltage_table,
- u32 clock, u32 *voltage)
-{
- u32 i = 0;
-
- if (allowed_clock_voltage_table->count == 0)
- return -EINVAL;
-
- for (i = 0; i < allowed_clock_voltage_table->count; i++) {
- if (allowed_clock_voltage_table->entries[i].clk >= clock) {
- *voltage = allowed_clock_voltage_table->entries[i].v;
- return 0;
- }
- }
-
- *voltage = allowed_clock_voltage_table->entries[i-1].v;
-
- return 0;
-}
-
-static u8 ci_get_sleep_divider_id_from_clock(u32 sclk, u32 min_sclk_in_sr)
-{
- u32 i;
- u32 tmp;
- u32 min = max(min_sclk_in_sr, (u32)CISLAND_MINIMUM_ENGINE_CLOCK);
-
- if (sclk < min)
- return 0;
-
- for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
- tmp = sclk >> i;
- if (tmp >= min || i == 0)
- break;
- }
-
- return (u8)i;
-}
-
-static int ci_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
-{
- return ci_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
-}
-
-static int ci_reset_to_default(struct amdgpu_device *adev)
-{
- return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-}
-
-static int ci_force_switch_to_arb_f0(struct amdgpu_device *adev)
-{
- u32 tmp;
-
- tmp = (RREG32_SMC(ixSMC_SCRATCH9) & 0x0000ff00) >> 8;
-
- if (tmp == MC_CG_ARB_FREQ_F0)
- return 0;
-
- return ci_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
-}
-
-static void ci_register_patching_mc_arb(struct amdgpu_device *adev,
- const u32 engine_clock,
- const u32 memory_clock,
- u32 *dram_timimg2)
-{
- bool patch;
- u32 tmp, tmp2;
-
- tmp = RREG32(mmMC_SEQ_MISC0);
- patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
-
- if (patch &&
- ((adev->pdev->device == 0x67B0) ||
- (adev->pdev->device == 0x67B1))) {
- if ((memory_clock > 100000) && (memory_clock <= 125000)) {
- tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
- *dram_timimg2 &= ~0x00ff0000;
- *dram_timimg2 |= tmp2 << 16;
- } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
- tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
- *dram_timimg2 &= ~0x00ff0000;
- *dram_timimg2 |= tmp2 << 16;
- }
- }
-}
-
-static int ci_populate_memory_timing_parameters(struct amdgpu_device *adev,
- u32 sclk,
- u32 mclk,
- SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
-{
- u32 dram_timing;
- u32 dram_timing2;
- u32 burst_time;
-
- amdgpu_atombios_set_engine_dram_timings(adev, sclk, mclk);
-
- dram_timing = RREG32(mmMC_ARB_DRAM_TIMING);
- dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
- burst_time = RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK;
-
- ci_register_patching_mc_arb(adev, sclk, mclk, &dram_timing2);
-
- arb_regs->McArbDramTiming = cpu_to_be32(dram_timing);
- arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
- arb_regs->McArbBurstTime = (u8)burst_time;
-
- return 0;
-}
-
-static int ci_do_program_memory_timing_parameters(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- SMU7_Discrete_MCArbDramTimingTable arb_regs;
- u32 i, j;
- int ret = 0;
-
- memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
-
- for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
- for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
- ret = ci_populate_memory_timing_parameters(adev,
- pi->dpm_table.sclk_table.dpm_levels[i].value,
- pi->dpm_table.mclk_table.dpm_levels[j].value,
- &arb_regs.entries[i][j]);
- if (ret)
- break;
- }
- }
-
- if (ret == 0)
- ret = amdgpu_ci_copy_bytes_to_smc(adev,
- pi->arb_table_start,
- (u8 *)&arb_regs,
- sizeof(SMU7_Discrete_MCArbDramTimingTable),
- pi->sram_end);
-
- return ret;
-}
-
-static int ci_program_memory_timing_parameters(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (pi->need_update_smu7_dpm_table == 0)
- return 0;
-
- return ci_do_program_memory_timing_parameters(adev);
-}
-
-static void ci_populate_smc_initial_state(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_boot_state)
-{
- struct ci_ps *boot_state = ci_get_ps(amdgpu_boot_state);
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 level = 0;
-
- for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
- if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
- boot_state->performance_levels[0].sclk) {
- pi->smc_state_table.GraphicsBootLevel = level;
- break;
- }
- }
-
- for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
- if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
- boot_state->performance_levels[0].mclk) {
- pi->smc_state_table.MemoryBootLevel = level;
- break;
- }
- }
-}
-
-static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
-{
- u32 i;
- u32 mask_value = 0;
-
- for (i = dpm_table->count; i > 0; i--) {
- mask_value = mask_value << 1;
- if (dpm_table->dpm_levels[i-1].enabled)
- mask_value |= 0x1;
- else
- mask_value &= 0xFFFFFFFE;
- }
-
- return mask_value;
-}
-
-static void ci_populate_smc_link_level(struct amdgpu_device *adev,
- SMU7_Discrete_DpmTable *table)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_dpm_table *dpm_table = &pi->dpm_table;
- u32 i;
-
- for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
- table->LinkLevel[i].PcieGenSpeed =
- (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
- table->LinkLevel[i].PcieLaneCount =
- amdgpu_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
- table->LinkLevel[i].EnabledForActivity = 1;
- table->LinkLevel[i].DownT = cpu_to_be32(5);
- table->LinkLevel[i].UpT = cpu_to_be32(30);
- }
-
- pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
- pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
- ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
-}
-
-static int ci_populate_smc_uvd_level(struct amdgpu_device *adev,
- SMU7_Discrete_DpmTable *table)
-{
- u32 count;
- struct atom_clock_dividers dividers;
- int ret = -EINVAL;
-
- table->UvdLevelCount =
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
-
- for (count = 0; count < table->UvdLevelCount; count++) {
- table->UvdLevel[count].VclkFrequency =
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
- table->UvdLevel[count].DclkFrequency =
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
- table->UvdLevel[count].MinVddc =
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
- table->UvdLevel[count].MinVddcPhases = 1;
-
- ret = amdgpu_atombios_get_clock_dividers(adev,
- COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
- table->UvdLevel[count].VclkFrequency, false, &dividers);
- if (ret)
- return ret;
-
- table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
-
- ret = amdgpu_atombios_get_clock_dividers(adev,
- COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
- table->UvdLevel[count].DclkFrequency, false, &dividers);
- if (ret)
- return ret;
-
- table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
-
- table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
- table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
- table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
- }
-
- return ret;
-}
-
-static int ci_populate_smc_vce_level(struct amdgpu_device *adev,
- SMU7_Discrete_DpmTable *table)
-{
- u32 count;
- struct atom_clock_dividers dividers;
- int ret = -EINVAL;
-
- table->VceLevelCount =
- adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
-
- for (count = 0; count < table->VceLevelCount; count++) {
- table->VceLevel[count].Frequency =
- adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
- table->VceLevel[count].MinVoltage =
- (u16)adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
- table->VceLevel[count].MinPhases = 1;
-
- ret = amdgpu_atombios_get_clock_dividers(adev,
- COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
- table->VceLevel[count].Frequency, false, &dividers);
- if (ret)
- return ret;
-
- table->VceLevel[count].Divider = (u8)dividers.post_divider;
-
- table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
- table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
- }
-
- return ret;
-
-}
-
-static int ci_populate_smc_acp_level(struct amdgpu_device *adev,
- SMU7_Discrete_DpmTable *table)
-{
- u32 count;
- struct atom_clock_dividers dividers;
- int ret = -EINVAL;
-
- table->AcpLevelCount = (u8)
- (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
-
- for (count = 0; count < table->AcpLevelCount; count++) {
- table->AcpLevel[count].Frequency =
- adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
- table->AcpLevel[count].MinVoltage =
- adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
- table->AcpLevel[count].MinPhases = 1;
-
- ret = amdgpu_atombios_get_clock_dividers(adev,
- COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
- table->AcpLevel[count].Frequency, false, &dividers);
- if (ret)
- return ret;
-
- table->AcpLevel[count].Divider = (u8)dividers.post_divider;
-
- table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
- table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
- }
-
- return ret;
-}
-
-static int ci_populate_smc_samu_level(struct amdgpu_device *adev,
- SMU7_Discrete_DpmTable *table)
-{
- u32 count;
- struct atom_clock_dividers dividers;
- int ret = -EINVAL;
-
- table->SamuLevelCount =
- adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
-
- for (count = 0; count < table->SamuLevelCount; count++) {
- table->SamuLevel[count].Frequency =
- adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
- table->SamuLevel[count].MinVoltage =
- adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
- table->SamuLevel[count].MinPhases = 1;
-
- ret = amdgpu_atombios_get_clock_dividers(adev,
- COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
- table->SamuLevel[count].Frequency, false, &dividers);
- if (ret)
- return ret;
-
- table->SamuLevel[count].Divider = (u8)dividers.post_divider;
-
- table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
- table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
- }
-
- return ret;
-}
-
-static int ci_calculate_mclk_params(struct amdgpu_device *adev,
- u32 memory_clock,
- SMU7_Discrete_MemoryLevel *mclk,
- bool strobe_mode,
- bool dll_state_on)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 dll_cntl = pi->clock_registers.dll_cntl;
- u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
- u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
- u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
- u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
- u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
- u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
- u32 mpll_ss1 = pi->clock_registers.mpll_ss1;
- u32 mpll_ss2 = pi->clock_registers.mpll_ss2;
- struct atom_mpll_param mpll_param;
- int ret;
-
- ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
- if (ret)
- return ret;
-
- mpll_func_cntl &= ~MPLL_FUNC_CNTL__BWCTRL_MASK;
- mpll_func_cntl |= (mpll_param.bwcntl << MPLL_FUNC_CNTL__BWCTRL__SHIFT);
-
- mpll_func_cntl_1 &= ~(MPLL_FUNC_CNTL_1__CLKF_MASK | MPLL_FUNC_CNTL_1__CLKFRAC_MASK |
- MPLL_FUNC_CNTL_1__VCO_MODE_MASK);
- mpll_func_cntl_1 |= (mpll_param.clkf) << MPLL_FUNC_CNTL_1__CLKF__SHIFT |
- (mpll_param.clkfrac << MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT) |
- (mpll_param.vco_mode << MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT);
-
- mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK;
- mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
-
- if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
- mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK |
- MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK);
- mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) |
- (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
- }
-
- if (pi->caps_mclk_ss_support) {
- struct amdgpu_atom_ss ss;
- u32 freq_nom;
- u32 tmp;
- u32 reference_clock = adev->clock.mpll.reference_freq;
-
- if (mpll_param.qdr == 1)
- freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
- else
- freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
-
- tmp = (freq_nom / reference_clock);
- tmp = tmp * tmp;
- if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
- ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
- u32 clks = reference_clock * 5 / ss.rate;
- u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
-
- mpll_ss1 &= ~MPLL_SS1__CLKV_MASK;
- mpll_ss1 |= (clkv << MPLL_SS1__CLKV__SHIFT);
-
- mpll_ss2 &= ~MPLL_SS2__CLKS_MASK;
- mpll_ss2 |= (clks << MPLL_SS2__CLKS__SHIFT);
- }
- }
-
- mclk_pwrmgt_cntl &= ~MCLK_PWRMGT_CNTL__DLL_SPEED_MASK;
- mclk_pwrmgt_cntl |= (mpll_param.dll_speed << MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT);
-
- if (dll_state_on)
- mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
- MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK;
- else
- mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
- MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
-
- mclk->MclkFrequency = memory_clock;
- mclk->MpllFuncCntl = mpll_func_cntl;
- mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
- mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
- mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
- mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
- mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
- mclk->DllCntl = dll_cntl;
- mclk->MpllSs1 = mpll_ss1;
- mclk->MpllSs2 = mpll_ss2;
-
- return 0;
-}
-
-static int ci_populate_single_memory_level(struct amdgpu_device *adev,
- u32 memory_clock,
- SMU7_Discrete_MemoryLevel *memory_level)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- int ret;
- bool dll_state_on;
-
- if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
- ret = ci_get_dependency_volt_by_clk(adev,
- &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
- memory_clock, &memory_level->MinVddc);
- if (ret)
- return ret;
- }
-
- if (adev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
- ret = ci_get_dependency_volt_by_clk(adev,
- &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
- memory_clock, &memory_level->MinVddci);
- if (ret)
- return ret;
- }
-
- if (adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
- ret = ci_get_dependency_volt_by_clk(adev,
- &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
- memory_clock, &memory_level->MinMvdd);
- if (ret)
- return ret;
- }
-
- memory_level->MinVddcPhases = 1;
-
- if (pi->vddc_phase_shed_control)
- ci_populate_phase_value_based_on_mclk(adev,
- &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
- memory_clock,
- &memory_level->MinVddcPhases);
-
- memory_level->EnabledForActivity = 1;
- memory_level->EnabledForThrottle = 1;
- memory_level->UpH = 0;
- memory_level->DownH = 100;
- memory_level->VoltageDownH = 0;
- memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
-
- memory_level->StutterEnable = false;
- memory_level->StrobeEnable = false;
- memory_level->EdcReadEnable = false;
- memory_level->EdcWriteEnable = false;
- memory_level->RttEnable = false;
-
- memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- if (pi->mclk_stutter_mode_threshold &&
- (memory_clock <= pi->mclk_stutter_mode_threshold) &&
- (!pi->uvd_enabled) &&
- (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
- (adev->pm.dpm.new_active_crtc_count <= 2))
- memory_level->StutterEnable = true;
-
- if (pi->mclk_strobe_mode_threshold &&
- (memory_clock <= pi->mclk_strobe_mode_threshold))
- memory_level->StrobeEnable = 1;
-
- if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
- memory_level->StrobeRatio =
- ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
- if (pi->mclk_edc_enable_threshold &&
- (memory_clock > pi->mclk_edc_enable_threshold))
- memory_level->EdcReadEnable = true;
-
- if (pi->mclk_edc_wr_enable_threshold &&
- (memory_clock > pi->mclk_edc_wr_enable_threshold))
- memory_level->EdcWriteEnable = true;
-
- if (memory_level->StrobeEnable) {
- if (ci_get_mclk_frequency_ratio(memory_clock, true) >=
- ((RREG32(mmMC_SEQ_MISC7) >> 16) & 0xf))
- dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
- else
- dll_state_on = ((RREG32(mmMC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
- } else {
- dll_state_on = pi->dll_default_on;
- }
- } else {
- memory_level->StrobeRatio = ci_get_ddr3_mclk_frequency_ratio(memory_clock);
- dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
- }
-
- ret = ci_calculate_mclk_params(adev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
- if (ret)
- return ret;
-
- memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
- memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
- memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
- memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
-
- memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
- memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
- memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
- memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
- memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
- memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
- memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
- memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
- memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
- memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
- memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
-
- return 0;
-}
-
-static int ci_populate_smc_acpi_level(struct amdgpu_device *adev,
- SMU7_Discrete_DpmTable *table)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct atom_clock_dividers dividers;
- SMU7_Discrete_VoltageLevel voltage_level;
- u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
- u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
- u32 dll_cntl = pi->clock_registers.dll_cntl;
- u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
- int ret;
-
- table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
-
- if (pi->acpi_vddc)
- table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
- else
- table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
-
- table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
-
- table->ACPILevel.SclkFrequency = adev->clock.spll.reference_freq;
-
- ret = amdgpu_atombios_get_clock_dividers(adev,
- COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
- table->ACPILevel.SclkFrequency, false, &dividers);
- if (ret)
- return ret;
-
- table->ACPILevel.SclkDid = (u8)dividers.post_divider;
- table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
- table->ACPILevel.DeepSleepDivId = 0;
-
- spll_func_cntl &= ~CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK;
- spll_func_cntl |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK;
-
- spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
- spll_func_cntl_2 |= (4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT);
-
- table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
- table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
- table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
- table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
- table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
- table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
- table->ACPILevel.CcPwrDynRm = 0;
- table->ACPILevel.CcPwrDynRm1 = 0;
-
- table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
- table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
- table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
- table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
- table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
- table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
- table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
- table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
- table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
- table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
- table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
-
- table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
- table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
-
- if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
- if (pi->acpi_vddci)
- table->MemoryACPILevel.MinVddci =
- cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
- else
- table->MemoryACPILevel.MinVddci =
- cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
- }
-
- if (ci_populate_mvdd_value(adev, 0, &voltage_level))
- table->MemoryACPILevel.MinMvdd = 0;
- else
- table->MemoryACPILevel.MinMvdd =
- cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
-
- mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK |
- MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK;
- mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
- MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
-
- dll_cntl &= ~(DLL_CNTL__MRDCK0_BYPASS_MASK | DLL_CNTL__MRDCK1_BYPASS_MASK);
-
- table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
- table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
- table->MemoryACPILevel.MpllAdFuncCntl =
- cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
- table->MemoryACPILevel.MpllDqFuncCntl =
- cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
- table->MemoryACPILevel.MpllFuncCntl =
- cpu_to_be32(pi->clock_registers.mpll_func_cntl);
- table->MemoryACPILevel.MpllFuncCntl_1 =
- cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
- table->MemoryACPILevel.MpllFuncCntl_2 =
- cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
- table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
- table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
-
- table->MemoryACPILevel.EnabledForThrottle = 0;
- table->MemoryACPILevel.EnabledForActivity = 0;
- table->MemoryACPILevel.UpH = 0;
- table->MemoryACPILevel.DownH = 100;
- table->MemoryACPILevel.VoltageDownH = 0;
- table->MemoryACPILevel.ActivityLevel =
- cpu_to_be16((u16)pi->mclk_activity_target);
-
- table->MemoryACPILevel.StutterEnable = false;
- table->MemoryACPILevel.StrobeEnable = false;
- table->MemoryACPILevel.EdcReadEnable = false;
- table->MemoryACPILevel.EdcWriteEnable = false;
- table->MemoryACPILevel.RttEnable = false;
-
- return 0;
-}
-
-
-static int ci_enable_ulv(struct amdgpu_device *adev, bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_ulv_parm *ulv = &pi->ulv;
-
- if (ulv->supported) {
- if (enable)
- return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
- else
- return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
- }
-
- return 0;
-}
-
-static int ci_populate_ulv_level(struct amdgpu_device *adev,
- SMU7_Discrete_Ulv *state)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u16 ulv_voltage = adev->pm.dpm.backbias_response_time;
-
- state->CcPwrDynRm = 0;
- state->CcPwrDynRm1 = 0;
-
- if (ulv_voltage == 0) {
- pi->ulv.supported = false;
- return 0;
- }
-
- if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
- if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
- state->VddcOffset = 0;
- else
- state->VddcOffset =
- adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
- } else {
- if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
- state->VddcOffsetVid = 0;
- else
- state->VddcOffsetVid = (u8)
- ((adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
- VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
- }
- state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
-
- state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
- state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
- state->VddcOffset = cpu_to_be16(state->VddcOffset);
-
- return 0;
-}
-
-static int ci_calculate_sclk_params(struct amdgpu_device *adev,
- u32 engine_clock,
- SMU7_Discrete_GraphicsLevel *sclk)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct atom_clock_dividers dividers;
- u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
- u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
- u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
- u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
- u32 reference_clock = adev->clock.spll.reference_freq;
- u32 reference_divider;
- u32 fbdiv;
- int ret;
-
- ret = amdgpu_atombios_get_clock_dividers(adev,
- COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
- engine_clock, false, &dividers);
- if (ret)
- return ret;
-
- reference_divider = 1 + dividers.ref_div;
- fbdiv = dividers.fb_div & 0x3FFFFFF;
-
- spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;
- spll_func_cntl_3 |= (fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT);
- spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;
-
- if (pi->caps_sclk_ss_support) {
- struct amdgpu_atom_ss ss;
- u32 vco_freq = engine_clock * dividers.post_div;
-
- if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
- ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
- u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
- u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
-
- cg_spll_spread_spectrum &= ~(CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK | CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);
- cg_spll_spread_spectrum |= (clk_s << CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT);
- cg_spll_spread_spectrum |= (1 << CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT);
-
- cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK;
- cg_spll_spread_spectrum_2 |= (clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT);
- }
- }
-
- sclk->SclkFrequency = engine_clock;
- sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
- sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
- sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
- sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
- sclk->SclkDid = (u8)dividers.post_divider;
-
- return 0;
-}
-
-static int ci_populate_single_graphic_level(struct amdgpu_device *adev,
- u32 engine_clock,
- u16 sclk_activity_level_t,
- SMU7_Discrete_GraphicsLevel *graphic_level)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- int ret;
-
- ret = ci_calculate_sclk_params(adev, engine_clock, graphic_level);
- if (ret)
- return ret;
-
- ret = ci_get_dependency_volt_by_clk(adev,
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
- engine_clock, &graphic_level->MinVddc);
- if (ret)
- return ret;
-
- graphic_level->SclkFrequency = engine_clock;
-
- graphic_level->Flags = 0;
- graphic_level->MinVddcPhases = 1;
-
- if (pi->vddc_phase_shed_control)
- ci_populate_phase_value_based_on_sclk(adev,
- &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
- engine_clock,
- &graphic_level->MinVddcPhases);
-
- graphic_level->ActivityLevel = sclk_activity_level_t;
-
- graphic_level->CcPwrDynRm = 0;
- graphic_level->CcPwrDynRm1 = 0;
- graphic_level->EnabledForThrottle = 1;
- graphic_level->UpH = 0;
- graphic_level->DownH = 0;
- graphic_level->VoltageDownH = 0;
- graphic_level->PowerThrottle = 0;
-
- if (pi->caps_sclk_ds)
- graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(engine_clock,
- CISLAND_MINIMUM_ENGINE_CLOCK);
-
- graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
-
- graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
- graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
- graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
- graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
- graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
- graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
- graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
- graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
- graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
- graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
- graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
-
- return 0;
-}
-
-static int ci_populate_all_graphic_levels(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_dpm_table *dpm_table = &pi->dpm_table;
- u32 level_array_address = pi->dpm_table_start +
- offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
- u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
- SMU7_MAX_LEVELS_GRAPHICS;
- SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
- u32 i, ret;
-
- memset(levels, 0, level_array_size);
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- ret = ci_populate_single_graphic_level(adev,
- dpm_table->sclk_table.dpm_levels[i].value,
- (u16)pi->activity_target[i],
- &pi->smc_state_table.GraphicsLevel[i]);
- if (ret)
- return ret;
- if (i > 1)
- pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
- if (i == (dpm_table->sclk_table.count - 1))
- pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
- PPSMC_DISPLAY_WATERMARK_HIGH;
- }
- pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
-
- pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
- pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
- ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
-
- ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
- (u8 *)levels, level_array_size,
- pi->sram_end);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int ci_populate_ulv_state(struct amdgpu_device *adev,
- SMU7_Discrete_Ulv *ulv_level)
-{
- return ci_populate_ulv_level(adev, ulv_level);
-}
-
-static int ci_populate_all_memory_levels(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_dpm_table *dpm_table = &pi->dpm_table;
- u32 level_array_address = pi->dpm_table_start +
- offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
- u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
- SMU7_MAX_LEVELS_MEMORY;
- SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
- u32 i, ret;
-
- memset(levels, 0, level_array_size);
-
- for (i = 0; i < dpm_table->mclk_table.count; i++) {
- if (dpm_table->mclk_table.dpm_levels[i].value == 0)
- return -EINVAL;
- ret = ci_populate_single_memory_level(adev,
- dpm_table->mclk_table.dpm_levels[i].value,
- &pi->smc_state_table.MemoryLevel[i]);
- if (ret)
- return ret;
- }
-
- if ((dpm_table->mclk_table.count >= 2) &&
- ((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
- pi->smc_state_table.MemoryLevel[1].MinVddc =
- pi->smc_state_table.MemoryLevel[0].MinVddc;
- pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
- pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
- }
-
- pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
-
- pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
- ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
-
- pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
- PPSMC_DISPLAY_WATERMARK_HIGH;
-
- ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
- (u8 *)levels, level_array_size,
- pi->sram_end);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static void ci_reset_single_dpm_table(struct amdgpu_device *adev,
- struct ci_single_dpm_table* dpm_table,
- u32 count)
-{
- u32 i;
-
- dpm_table->count = count;
- for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
- dpm_table->dpm_levels[i].enabled = false;
-}
-
-static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
- u32 index, u32 pcie_gen, u32 pcie_lanes)
-{
- dpm_table->dpm_levels[index].value = pcie_gen;
- dpm_table->dpm_levels[index].param1 = pcie_lanes;
- dpm_table->dpm_levels[index].enabled = true;
-}
-
-static int ci_setup_default_pcie_tables(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
- return -EINVAL;
-
- if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
- pi->pcie_gen_powersaving = pi->pcie_gen_performance;
- pi->pcie_lane_powersaving = pi->pcie_lane_performance;
- } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
- pi->pcie_gen_performance = pi->pcie_gen_powersaving;
- pi->pcie_lane_performance = pi->pcie_lane_powersaving;
- }
-
- ci_reset_single_dpm_table(adev,
- &pi->dpm_table.pcie_speed_table,
- SMU7_MAX_LEVELS_LINK);
-
- if (adev->asic_type == CHIP_BONAIRE)
- ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
- pi->pcie_gen_powersaving.min,
- pi->pcie_lane_powersaving.max);
- else
- ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
- pi->pcie_gen_powersaving.min,
- pi->pcie_lane_powersaving.min);
- ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
- pi->pcie_gen_performance.min,
- pi->pcie_lane_performance.min);
- ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
- pi->pcie_gen_powersaving.min,
- pi->pcie_lane_powersaving.max);
- ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
- pi->pcie_gen_performance.min,
- pi->pcie_lane_performance.max);
- ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
- pi->pcie_gen_powersaving.max,
- pi->pcie_lane_powersaving.max);
- ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
- pi->pcie_gen_performance.max,
- pi->pcie_lane_performance.max);
-
- pi->dpm_table.pcie_speed_table.count = 6;
-
- return 0;
-}
-
-static int ci_setup_default_dpm_tables(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
- struct amdgpu_clock_voltage_dependency_table *allowed_mclk_table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
- struct amdgpu_cac_leakage_table *std_voltage_table =
- &adev->pm.dpm.dyn_state.cac_leakage_table;
- u32 i;
-
- if (allowed_sclk_vddc_table == NULL)
- return -EINVAL;
- if (allowed_sclk_vddc_table->count < 1)
- return -EINVAL;
- if (allowed_mclk_table == NULL)
- return -EINVAL;
- if (allowed_mclk_table->count < 1)
- return -EINVAL;
-
- memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
-
- ci_reset_single_dpm_table(adev,
- &pi->dpm_table.sclk_table,
- SMU7_MAX_LEVELS_GRAPHICS);
- ci_reset_single_dpm_table(adev,
- &pi->dpm_table.mclk_table,
- SMU7_MAX_LEVELS_MEMORY);
- ci_reset_single_dpm_table(adev,
- &pi->dpm_table.vddc_table,
- SMU7_MAX_LEVELS_VDDC);
- ci_reset_single_dpm_table(adev,
- &pi->dpm_table.vddci_table,
- SMU7_MAX_LEVELS_VDDCI);
- ci_reset_single_dpm_table(adev,
- &pi->dpm_table.mvdd_table,
- SMU7_MAX_LEVELS_MVDD);
-
- pi->dpm_table.sclk_table.count = 0;
- for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
- if ((i == 0) ||
- (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
- allowed_sclk_vddc_table->entries[i].clk)) {
- pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
- allowed_sclk_vddc_table->entries[i].clk;
- pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
- (i == 0) ? true : false;
- pi->dpm_table.sclk_table.count++;
- }
- }
-
- pi->dpm_table.mclk_table.count = 0;
- for (i = 0; i < allowed_mclk_table->count; i++) {
- if ((i == 0) ||
- (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
- allowed_mclk_table->entries[i].clk)) {
- pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
- allowed_mclk_table->entries[i].clk;
- pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
- (i == 0) ? true : false;
- pi->dpm_table.mclk_table.count++;
- }
- }
-
- for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
- pi->dpm_table.vddc_table.dpm_levels[i].value =
- allowed_sclk_vddc_table->entries[i].v;
- pi->dpm_table.vddc_table.dpm_levels[i].param1 =
- std_voltage_table->entries[i].leakage;
- pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
- }
- pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
-
- allowed_mclk_table = &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
- if (allowed_mclk_table) {
- for (i = 0; i < allowed_mclk_table->count; i++) {
- pi->dpm_table.vddci_table.dpm_levels[i].value =
- allowed_mclk_table->entries[i].v;
- pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
- }
- pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
- }
-
- allowed_mclk_table = &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
- if (allowed_mclk_table) {
- for (i = 0; i < allowed_mclk_table->count; i++) {
- pi->dpm_table.mvdd_table.dpm_levels[i].value =
- allowed_mclk_table->entries[i].v;
- pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
- }
- pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
- }
-
- ci_setup_default_pcie_tables(adev);
-
- /* save a copy of the default DPM table */
- memcpy(&(pi->golden_dpm_table), &(pi->dpm_table),
- sizeof(struct ci_dpm_table));
-
- return 0;
-}
-
-static int ci_find_boot_level(struct ci_single_dpm_table *table,
- u32 value, u32 *boot_level)
-{
- u32 i;
- int ret = -EINVAL;
-
- for(i = 0; i < table->count; i++) {
- if (value == table->dpm_levels[i].value) {
- *boot_level = i;
- ret = 0;
- }
- }
-
- return ret;
-}
-
-static void ci_save_default_power_profile(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct SMU7_Discrete_GraphicsLevel *levels =
- pi->smc_state_table.GraphicsLevel;
- uint32_t min_level = 0;
-
- pi->default_gfx_power_profile.activity_threshold =
- be16_to_cpu(levels[0].ActivityLevel);
- pi->default_gfx_power_profile.up_hyst = levels[0].UpH;
- pi->default_gfx_power_profile.down_hyst = levels[0].DownH;
- pi->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
-
- pi->default_compute_power_profile = pi->default_gfx_power_profile;
- pi->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
-
- /* Optimize compute power profile: Use only highest
- * 2 power levels (if more than 2 are available), Hysteresis:
- * 0ms up, 5ms down
- */
- if (pi->smc_state_table.GraphicsDpmLevelCount > 2)
- min_level = pi->smc_state_table.GraphicsDpmLevelCount - 2;
- else if (pi->smc_state_table.GraphicsDpmLevelCount == 2)
- min_level = 1;
- pi->default_compute_power_profile.min_sclk =
- be32_to_cpu(levels[min_level].SclkFrequency);
-
- pi->default_compute_power_profile.up_hyst = 0;
- pi->default_compute_power_profile.down_hyst = 5;
-
- pi->gfx_power_profile = pi->default_gfx_power_profile;
- pi->compute_power_profile = pi->default_compute_power_profile;
-}
-
-static int ci_init_smc_table(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_ulv_parm *ulv = &pi->ulv;
- struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
- SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
- int ret;
-
- ret = ci_setup_default_dpm_tables(adev);
- if (ret)
- return ret;
-
- if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
- ci_populate_smc_voltage_tables(adev, table);
-
- ci_init_fps_limits(adev);
-
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
-
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
- table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
-
- if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
- table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
-
- if (ulv->supported) {
- ret = ci_populate_ulv_state(adev, &pi->smc_state_table.Ulv);
- if (ret)
- return ret;
- WREG32_SMC(ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
- }
-
- ret = ci_populate_all_graphic_levels(adev);
- if (ret)
- return ret;
-
- ret = ci_populate_all_memory_levels(adev);
- if (ret)
- return ret;
-
- ci_populate_smc_link_level(adev, table);
-
- ret = ci_populate_smc_acpi_level(adev, table);
- if (ret)
- return ret;
-
- ret = ci_populate_smc_vce_level(adev, table);
- if (ret)
- return ret;
-
- ret = ci_populate_smc_acp_level(adev, table);
- if (ret)
- return ret;
-
- ret = ci_populate_smc_samu_level(adev, table);
- if (ret)
- return ret;
-
- ret = ci_do_program_memory_timing_parameters(adev);
- if (ret)
- return ret;
-
- ret = ci_populate_smc_uvd_level(adev, table);
- if (ret)
- return ret;
-
- table->UvdBootLevel = 0;
- table->VceBootLevel = 0;
- table->AcpBootLevel = 0;
- table->SamuBootLevel = 0;
- table->GraphicsBootLevel = 0;
- table->MemoryBootLevel = 0;
-
- ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
- pi->vbios_boot_state.sclk_bootup_value,
- (u32 *)&pi->smc_state_table.GraphicsBootLevel);
-
- ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
- pi->vbios_boot_state.mclk_bootup_value,
- (u32 *)&pi->smc_state_table.MemoryBootLevel);
-
- table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
- table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
- table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
-
- ci_populate_smc_initial_state(adev, amdgpu_boot_state);
-
- ret = ci_populate_bapm_parameters_in_dpm_table(adev);
- if (ret)
- return ret;
-
- table->UVDInterval = 1;
- table->VCEInterval = 1;
- table->ACPInterval = 1;
- table->SAMUInterval = 1;
- table->GraphicsVoltageChangeEnable = 1;
- table->GraphicsThermThrottleEnable = 1;
- table->GraphicsInterval = 1;
- table->VoltageInterval = 1;
- table->ThermalInterval = 1;
- table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
- CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
- table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
- CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
- table->MemoryVoltageChangeEnable = 1;
- table->MemoryInterval = 1;
- table->VoltageResponseTime = 0;
- table->VddcVddciDelta = 4000;
- table->PhaseResponseTime = 0;
- table->MemoryThermThrottleEnable = 1;
- table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
- table->PCIeGenInterval = 1;
- if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
- table->SVI2Enable = 1;
- else
- table->SVI2Enable = 0;
-
- table->ThermGpio = 17;
- table->SclkStepSize = 0x4000;
-
- table->SystemFlags = cpu_to_be32(table->SystemFlags);
- table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
- table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
- table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
- table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
- table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
- table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
- table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
- table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
- table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
- table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
- table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
- table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
- table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
-
- ret = amdgpu_ci_copy_bytes_to_smc(adev,
- pi->dpm_table_start +
- offsetof(SMU7_Discrete_DpmTable, SystemFlags),
- (u8 *)&table->SystemFlags,
- sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
- pi->sram_end);
- if (ret)
- return ret;
-
- ci_save_default_power_profile(adev);
-
- return 0;
-}
-
-static void ci_trim_single_dpm_states(struct amdgpu_device *adev,
- struct ci_single_dpm_table *dpm_table,
- u32 low_limit, u32 high_limit)
-{
- u32 i;
-
- for (i = 0; i < dpm_table->count; i++) {
- if ((dpm_table->dpm_levels[i].value < low_limit) ||
- (dpm_table->dpm_levels[i].value > high_limit))
- dpm_table->dpm_levels[i].enabled = false;
- else
- dpm_table->dpm_levels[i].enabled = true;
- }
-}
-
-static void ci_trim_pcie_dpm_states(struct amdgpu_device *adev,
- u32 speed_low, u32 lanes_low,
- u32 speed_high, u32 lanes_high)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
- u32 i, j;
-
- for (i = 0; i < pcie_table->count; i++) {
- if ((pcie_table->dpm_levels[i].value < speed_low) ||
- (pcie_table->dpm_levels[i].param1 < lanes_low) ||
- (pcie_table->dpm_levels[i].value > speed_high) ||
- (pcie_table->dpm_levels[i].param1 > lanes_high))
- pcie_table->dpm_levels[i].enabled = false;
- else
- pcie_table->dpm_levels[i].enabled = true;
- }
-
- for (i = 0; i < pcie_table->count; i++) {
- if (pcie_table->dpm_levels[i].enabled) {
- for (j = i + 1; j < pcie_table->count; j++) {
- if (pcie_table->dpm_levels[j].enabled) {
- if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
- (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
- pcie_table->dpm_levels[j].enabled = false;
- }
- }
- }
- }
-}
-
-static int ci_trim_dpm_states(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state)
-{
- struct ci_ps *state = ci_get_ps(amdgpu_state);
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 high_limit_count;
-
- if (state->performance_level_count < 1)
- return -EINVAL;
-
- if (state->performance_level_count == 1)
- high_limit_count = 0;
- else
- high_limit_count = 1;
-
- ci_trim_single_dpm_states(adev,
- &pi->dpm_table.sclk_table,
- state->performance_levels[0].sclk,
- state->performance_levels[high_limit_count].sclk);
-
- ci_trim_single_dpm_states(adev,
- &pi->dpm_table.mclk_table,
- state->performance_levels[0].mclk,
- state->performance_levels[high_limit_count].mclk);
-
- ci_trim_pcie_dpm_states(adev,
- state->performance_levels[0].pcie_gen,
- state->performance_levels[0].pcie_lane,
- state->performance_levels[high_limit_count].pcie_gen,
- state->performance_levels[high_limit_count].pcie_lane);
-
- return 0;
-}
-
-static int ci_apply_disp_minimum_voltage_request(struct amdgpu_device *adev)
-{
- struct amdgpu_clock_voltage_dependency_table *disp_voltage_table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
- struct amdgpu_clock_voltage_dependency_table *vddc_table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
- u32 requested_voltage = 0;
- u32 i;
-
- if (disp_voltage_table == NULL)
- return -EINVAL;
- if (!disp_voltage_table->count)
- return -EINVAL;
-
- for (i = 0; i < disp_voltage_table->count; i++) {
- if (adev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
- requested_voltage = disp_voltage_table->entries[i].v;
- }
-
- for (i = 0; i < vddc_table->count; i++) {
- if (requested_voltage <= vddc_table->entries[i].v) {
- requested_voltage = vddc_table->entries[i].v;
- return (amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_VddC_Request,
- requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
- }
- }
-
- return -EINVAL;
-}
-
-static int ci_upload_dpm_level_enable_mask(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- PPSMC_Result result;
-
- ci_apply_disp_minimum_voltage_request(adev);
-
- if (!pi->sclk_dpm_key_disabled) {
- if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
- result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
- if (result != PPSMC_Result_OK)
- return -EINVAL;
- }
- }
-
- if (!pi->mclk_dpm_key_disabled) {
- if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
- result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
- if (result != PPSMC_Result_OK)
- return -EINVAL;
- }
- }
-
-#if 0
- if (!pi->pcie_dpm_key_disabled) {
- if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
- result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_PCIeDPM_SetEnabledMask,
- pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
- if (result != PPSMC_Result_OK)
- return -EINVAL;
- }
- }
-#endif
-
- return 0;
-}
-
-static void ci_find_dpm_states_clocks_in_dpm_table(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_ps *state = ci_get_ps(amdgpu_state);
- struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
- u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
- struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
- u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
- u32 i;
-
- pi->need_update_smu7_dpm_table = 0;
-
- for (i = 0; i < sclk_table->count; i++) {
- if (sclk == sclk_table->dpm_levels[i].value)
- break;
- }
-
- if (i >= sclk_table->count) {
- pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
- } else {
- /* XXX check display min clock requirements */
- if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
- pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
- }
-
- for (i = 0; i < mclk_table->count; i++) {
- if (mclk == mclk_table->dpm_levels[i].value)
- break;
- }
-
- if (i >= mclk_table->count)
- pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
-
- if (adev->pm.dpm.current_active_crtc_count !=
- adev->pm.dpm.new_active_crtc_count)
- pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
-}
-
-static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_ps *state = ci_get_ps(amdgpu_state);
- u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
- u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
- struct ci_dpm_table *dpm_table = &pi->dpm_table;
- int ret;
-
- if (!pi->need_update_smu7_dpm_table)
- return 0;
-
- if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
- dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
-
- if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
- dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
-
- if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
- ret = ci_populate_all_graphic_levels(adev);
- if (ret)
- return ret;
- }
-
- if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
- ret = ci_populate_all_memory_levels(adev);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- const struct amdgpu_clock_and_voltage_limits *max_limits;
- int i;
-
- if (adev->pm.dpm.ac_power)
- max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
- else
- max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
-
- if (enable) {
- pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
-
- for (i = adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
- if (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
- pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
-
- if (!pi->caps_uvd_dpm)
- break;
- }
- }
-
- amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_UVDDPM_SetEnabledMask,
- pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
-
- if (pi->last_mclk_dpm_enable_mask & 0x1) {
- pi->uvd_enabled = true;
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
- amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
- }
- } else {
- if (pi->uvd_enabled) {
- pi->uvd_enabled = false;
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
- amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
- }
- }
-
- return (amdgpu_ci_send_msg_to_smc(adev, enable ?
- PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-}
-
-static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- const struct amdgpu_clock_and_voltage_limits *max_limits;
- int i;
-
- if (adev->pm.dpm.ac_power)
- max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
- else
- max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
-
- if (enable) {
- pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
- for (i = adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
- if (adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
- pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
-
- if (!pi->caps_vce_dpm)
- break;
- }
- }
-
- amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_VCEDPM_SetEnabledMask,
- pi->dpm_level_enable_mask.vce_dpm_enable_mask);
- }
-
- return (amdgpu_ci_send_msg_to_smc(adev, enable ?
- PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-}
-
-#if 0
-static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- const struct amdgpu_clock_and_voltage_limits *max_limits;
- int i;
-
- if (adev->pm.dpm.ac_power)
- max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
- else
- max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
-
- if (enable) {
- pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
- for (i = adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
- if (adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
- pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
-
- if (!pi->caps_samu_dpm)
- break;
- }
- }
-
- amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SAMUDPM_SetEnabledMask,
- pi->dpm_level_enable_mask.samu_dpm_enable_mask);
- }
- return (amdgpu_ci_send_msg_to_smc(adev, enable ?
- PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-}
-
-static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- const struct amdgpu_clock_and_voltage_limits *max_limits;
- int i;
-
- if (adev->pm.dpm.ac_power)
- max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
- else
- max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
-
- if (enable) {
- pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
- for (i = adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
- if (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
- pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
-
- if (!pi->caps_acp_dpm)
- break;
- }
- }
-
- amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_ACPDPM_SetEnabledMask,
- pi->dpm_level_enable_mask.acp_dpm_enable_mask);
- }
-
- return (amdgpu_ci_send_msg_to_smc(adev, enable ?
- PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
- 0 : -EINVAL;
-}
-#endif
-
-static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 tmp;
- int ret = 0;
-
- if (!gate) {
- /* turn the clocks on when decoding */
- if (pi->caps_uvd_dpm ||
- (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
- pi->smc_state_table.UvdBootLevel = 0;
- else
- pi->smc_state_table.UvdBootLevel =
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
-
- tmp = RREG32_SMC(ixDPM_TABLE_475);
- tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
- tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
- WREG32_SMC(ixDPM_TABLE_475, tmp);
- ret = ci_enable_uvd_dpm(adev, true);
- } else {
- ret = ci_enable_uvd_dpm(adev, false);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
-static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
-{
- u8 i;
- u32 min_evclk = 30000; /* ??? */
- struct amdgpu_vce_clock_voltage_dependency_table *table =
- &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
-
- for (i = 0; i < table->count; i++) {
- if (table->entries[i].evclk >= min_evclk)
- return i;
- }
-
- return table->count - 1;
-}
-
-static int ci_update_vce_dpm(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_new_state,
- struct amdgpu_ps *amdgpu_current_state)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- int ret = 0;
- u32 tmp;
-
- if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
- if (amdgpu_new_state->evclk) {
- pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
- tmp = RREG32_SMC(ixDPM_TABLE_475);
- tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
- tmp |= (pi->smc_state_table.VceBootLevel << DPM_TABLE_475__VceBootLevel__SHIFT);
- WREG32_SMC(ixDPM_TABLE_475, tmp);
-
- ret = ci_enable_vce_dpm(adev, true);
- } else {
- ret = ci_enable_vce_dpm(adev, false);
- if (ret)
- return ret;
- }
- }
- return ret;
-}
-
-#if 0
-static int ci_update_samu_dpm(struct amdgpu_device *adev, bool gate)
-{
- return ci_enable_samu_dpm(adev, gate);
-}
-
-static int ci_update_acp_dpm(struct amdgpu_device *adev, bool gate)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 tmp;
-
- if (!gate) {
- pi->smc_state_table.AcpBootLevel = 0;
-
- tmp = RREG32_SMC(ixDPM_TABLE_475);
- tmp &= ~AcpBootLevel_MASK;
- tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
- WREG32_SMC(ixDPM_TABLE_475, tmp);
- }
-
- return ci_enable_acp_dpm(adev, !gate);
-}
-#endif
-
-static int ci_generate_dpm_level_enable_mask(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- int ret;
-
- ret = ci_trim_dpm_states(adev, amdgpu_state);
- if (ret)
- return ret;
-
- pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
- ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
- ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
- pi->last_mclk_dpm_enable_mask =
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
- if (pi->uvd_enabled) {
- if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
- }
- pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
- ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
-
- return 0;
-}
-
-static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
- u32 level_mask)
-{
- u32 level = 0;
-
- while ((level_mask & (1 << level)) == 0)
- level++;
-
- return level;
-}
-
-
-static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
- enum amd_dpm_forced_level level)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 tmp, levels, i;
- int ret;
-
- if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
- if ((!pi->pcie_dpm_key_disabled) &&
- pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
- levels = 0;
- tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
- while (tmp >>= 1)
- levels++;
- if (levels) {
- ret = ci_dpm_force_state_pcie(adev, level);
- if (ret)
- return ret;
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
- TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
- TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
- if (tmp == levels)
- break;
- udelay(1);
- }
- }
- }
- if ((!pi->sclk_dpm_key_disabled) &&
- pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
- levels = 0;
- tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
- while (tmp >>= 1)
- levels++;
- if (levels) {
- ret = ci_dpm_force_state_sclk(adev, levels);
- if (ret)
- return ret;
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
- if (tmp == levels)
- break;
- udelay(1);
- }
- }
- }
- if ((!pi->mclk_dpm_key_disabled) &&
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
- levels = 0;
- tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
- while (tmp >>= 1)
- levels++;
- if (levels) {
- ret = ci_dpm_force_state_mclk(adev, levels);
- if (ret)
- return ret;
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
- if (tmp == levels)
- break;
- udelay(1);
- }
- }
- }
- } else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
- if ((!pi->sclk_dpm_key_disabled) &&
- pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
- levels = ci_get_lowest_enabled_level(adev,
- pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
- ret = ci_dpm_force_state_sclk(adev, levels);
- if (ret)
- return ret;
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
- if (tmp == levels)
- break;
- udelay(1);
- }
- }
- if ((!pi->mclk_dpm_key_disabled) &&
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
- levels = ci_get_lowest_enabled_level(adev,
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
- ret = ci_dpm_force_state_mclk(adev, levels);
- if (ret)
- return ret;
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
- TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
- if (tmp == levels)
- break;
- udelay(1);
- }
- }
- if ((!pi->pcie_dpm_key_disabled) &&
- pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
- levels = ci_get_lowest_enabled_level(adev,
- pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
- ret = ci_dpm_force_state_pcie(adev, levels);
- if (ret)
- return ret;
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
- TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
- TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
- if (tmp == levels)
- break;
- udelay(1);
- }
- }
- } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
- if (!pi->pcie_dpm_key_disabled) {
- PPSMC_Result smc_result;
-
- smc_result = amdgpu_ci_send_msg_to_smc(adev,
- PPSMC_MSG_PCIeDPM_UnForceLevel);
- if (smc_result != PPSMC_Result_OK)
- return -EINVAL;
- }
- ret = ci_upload_dpm_level_enable_mask(adev);
- if (ret)
- return ret;
- }
-
- adev->pm.dpm.forced_level = level;
-
- return 0;
-}
-
-static int ci_set_mc_special_registers(struct amdgpu_device *adev,
- struct ci_mc_reg_table *table)
-{
- u8 i, j, k;
- u32 temp_reg;
-
- for (i = 0, j = table->last; i < table->last; i++) {
- if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
- switch(table->mc_reg_address[i].s1) {
- case mmMC_SEQ_MISC1:
- temp_reg = RREG32(mmMC_PMG_CMD_EMRS);
- table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
- table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
- }
- j++;
- if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
-
- temp_reg = RREG32(mmMC_PMG_CMD_MRS);
- table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
- table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
- if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
- table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
- }
- j++;
- if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
-
- if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
- table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
- table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
- }
- j++;
- if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
- }
- break;
- case mmMC_SEQ_RESERVE_M:
- temp_reg = RREG32(mmMC_PMG_CMD_MRS1);
- table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
- table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
- for (k = 0; k < table->num_entries; k++) {
- table->mc_reg_table_entry[k].mc_data[j] =
- (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
- }
- j++;
- if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
- break;
- default:
- break;
- }
-
- }
-
- table->last = j;
-
- return 0;
-}
-
-static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
-{
- bool result = true;
-
- switch(in_reg) {
- case mmMC_SEQ_RAS_TIMING:
- *out_reg = mmMC_SEQ_RAS_TIMING_LP;
- break;
- case mmMC_SEQ_DLL_STBY:
- *out_reg = mmMC_SEQ_DLL_STBY_LP;
- break;
- case mmMC_SEQ_G5PDX_CMD0:
- *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
- break;
- case mmMC_SEQ_G5PDX_CMD1:
- *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
- break;
- case mmMC_SEQ_G5PDX_CTRL:
- *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
- break;
- case mmMC_SEQ_CAS_TIMING:
- *out_reg = mmMC_SEQ_CAS_TIMING_LP;
- break;
- case mmMC_SEQ_MISC_TIMING:
- *out_reg = mmMC_SEQ_MISC_TIMING_LP;
- break;
- case mmMC_SEQ_MISC_TIMING2:
- *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
- break;
- case mmMC_SEQ_PMG_DVS_CMD:
- *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
- break;
- case mmMC_SEQ_PMG_DVS_CTL:
- *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
- break;
- case mmMC_SEQ_RD_CTL_D0:
- *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
- break;
- case mmMC_SEQ_RD_CTL_D1:
- *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
- break;
- case mmMC_SEQ_WR_CTL_D0:
- *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
- break;
- case mmMC_SEQ_WR_CTL_D1:
- *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
- break;
- case mmMC_PMG_CMD_EMRS:
- *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
- break;
- case mmMC_PMG_CMD_MRS:
- *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
- break;
- case mmMC_PMG_CMD_MRS1:
- *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
- break;
- case mmMC_SEQ_PMG_TIMING:
- *out_reg = mmMC_SEQ_PMG_TIMING_LP;
- break;
- case mmMC_PMG_CMD_MRS2:
- *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
- break;
- case mmMC_SEQ_WR_CTL_2:
- *out_reg = mmMC_SEQ_WR_CTL_2_LP;
- break;
- default:
- result = false;
- break;
- }
-
- return result;
-}
-
-static void ci_set_valid_flag(struct ci_mc_reg_table *table)
-{
- u8 i, j;
-
- for (i = 0; i < table->last; i++) {
- for (j = 1; j < table->num_entries; j++) {
- if (table->mc_reg_table_entry[j-1].mc_data[i] !=
- table->mc_reg_table_entry[j].mc_data[i]) {
- table->valid_flag |= 1 << i;
- break;
- }
- }
- }
-}
-
-static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
-{
- u32 i;
- u16 address;
-
- for (i = 0; i < table->last; i++) {
- table->mc_reg_address[i].s0 =
- ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
- address : table->mc_reg_address[i].s1;
- }
-}
-
-static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
- struct ci_mc_reg_table *ci_table)
-{
- u8 i, j;
-
- if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
- if (table->num_entries > MAX_AC_TIMING_ENTRIES)
- return -EINVAL;
-
- for (i = 0; i < table->last; i++)
- ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
-
- ci_table->last = table->last;
-
- for (i = 0; i < table->num_entries; i++) {
- ci_table->mc_reg_table_entry[i].mclk_max =
- table->mc_reg_table_entry[i].mclk_max;
- for (j = 0; j < table->last; j++)
- ci_table->mc_reg_table_entry[i].mc_data[j] =
- table->mc_reg_table_entry[i].mc_data[j];
- }
- ci_table->num_entries = table->num_entries;
-
- return 0;
-}
-
-static int ci_register_patching_mc_seq(struct amdgpu_device *adev,
- struct ci_mc_reg_table *table)
-{
- u8 i, k;
- u32 tmp;
- bool patch;
-
- tmp = RREG32(mmMC_SEQ_MISC0);
- patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
-
- if (patch &&
- ((adev->pdev->device == 0x67B0) ||
- (adev->pdev->device == 0x67B1))) {
- for (i = 0; i < table->last; i++) {
- if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
- switch (table->mc_reg_address[i].s1) {
- case mmMC_SEQ_MISC1:
- for (k = 0; k < table->num_entries; k++) {
- if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
- (table->mc_reg_table_entry[k].mclk_max == 137500))
- table->mc_reg_table_entry[k].mc_data[i] =
- (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
- 0x00000007;
- }
- break;
- case mmMC_SEQ_WR_CTL_D0:
- for (k = 0; k < table->num_entries; k++) {
- if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
- (table->mc_reg_table_entry[k].mclk_max == 137500))
- table->mc_reg_table_entry[k].mc_data[i] =
- (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
- 0x0000D0DD;
- }
- break;
- case mmMC_SEQ_WR_CTL_D1:
- for (k = 0; k < table->num_entries; k++) {
- if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
- (table->mc_reg_table_entry[k].mclk_max == 137500))
- table->mc_reg_table_entry[k].mc_data[i] =
- (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
- 0x0000D0DD;
- }
- break;
- case mmMC_SEQ_WR_CTL_2:
- for (k = 0; k < table->num_entries; k++) {
- if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
- (table->mc_reg_table_entry[k].mclk_max == 137500))
- table->mc_reg_table_entry[k].mc_data[i] = 0;
- }
- break;
- case mmMC_SEQ_CAS_TIMING:
- for (k = 0; k < table->num_entries; k++) {
- if (table->mc_reg_table_entry[k].mclk_max == 125000)
- table->mc_reg_table_entry[k].mc_data[i] =
- (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
- 0x000C0140;
- else if (table->mc_reg_table_entry[k].mclk_max == 137500)
- table->mc_reg_table_entry[k].mc_data[i] =
- (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
- 0x000C0150;
- }
- break;
- case mmMC_SEQ_MISC_TIMING:
- for (k = 0; k < table->num_entries; k++) {
- if (table->mc_reg_table_entry[k].mclk_max == 125000)
- table->mc_reg_table_entry[k].mc_data[i] =
- (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
- 0x00000030;
- else if (table->mc_reg_table_entry[k].mclk_max == 137500)
- table->mc_reg_table_entry[k].mc_data[i] =
- (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
- 0x00000035;
- }
- break;
- default:
- break;
- }
- }
-
- WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
- tmp = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
- tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
- WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
- WREG32(mmMC_SEQ_IO_DEBUG_DATA, tmp);
- }
-
- return 0;
-}
-
-static int ci_initialize_mc_reg_table(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct atom_mc_reg_table *table;
- struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
- u8 module_index = ci_get_memory_module_index(adev);
- int ret;
-
- table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
- if (!table)
- return -ENOMEM;
-
- WREG32(mmMC_SEQ_RAS_TIMING_LP, RREG32(mmMC_SEQ_RAS_TIMING));
- WREG32(mmMC_SEQ_CAS_TIMING_LP, RREG32(mmMC_SEQ_CAS_TIMING));
- WREG32(mmMC_SEQ_DLL_STBY_LP, RREG32(mmMC_SEQ_DLL_STBY));
- WREG32(mmMC_SEQ_G5PDX_CMD0_LP, RREG32(mmMC_SEQ_G5PDX_CMD0));
- WREG32(mmMC_SEQ_G5PDX_CMD1_LP, RREG32(mmMC_SEQ_G5PDX_CMD1));
- WREG32(mmMC_SEQ_G5PDX_CTRL_LP, RREG32(mmMC_SEQ_G5PDX_CTRL));
- WREG32(mmMC_SEQ_PMG_DVS_CMD_LP, RREG32(mmMC_SEQ_PMG_DVS_CMD));
- WREG32(mmMC_SEQ_PMG_DVS_CTL_LP, RREG32(mmMC_SEQ_PMG_DVS_CTL));
- WREG32(mmMC_SEQ_MISC_TIMING_LP, RREG32(mmMC_SEQ_MISC_TIMING));
- WREG32(mmMC_SEQ_MISC_TIMING2_LP, RREG32(mmMC_SEQ_MISC_TIMING2));
- WREG32(mmMC_SEQ_PMG_CMD_EMRS_LP, RREG32(mmMC_PMG_CMD_EMRS));
- WREG32(mmMC_SEQ_PMG_CMD_MRS_LP, RREG32(mmMC_PMG_CMD_MRS));
- WREG32(mmMC_SEQ_PMG_CMD_MRS1_LP, RREG32(mmMC_PMG_CMD_MRS1));
- WREG32(mmMC_SEQ_WR_CTL_D0_LP, RREG32(mmMC_SEQ_WR_CTL_D0));
- WREG32(mmMC_SEQ_WR_CTL_D1_LP, RREG32(mmMC_SEQ_WR_CTL_D1));
- WREG32(mmMC_SEQ_RD_CTL_D0_LP, RREG32(mmMC_SEQ_RD_CTL_D0));
- WREG32(mmMC_SEQ_RD_CTL_D1_LP, RREG32(mmMC_SEQ_RD_CTL_D1));
- WREG32(mmMC_SEQ_PMG_TIMING_LP, RREG32(mmMC_SEQ_PMG_TIMING));
- WREG32(mmMC_SEQ_PMG_CMD_MRS2_LP, RREG32(mmMC_PMG_CMD_MRS2));
- WREG32(mmMC_SEQ_WR_CTL_2_LP, RREG32(mmMC_SEQ_WR_CTL_2));
-
- ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
- if (ret)
- goto init_mc_done;
-
- ret = ci_copy_vbios_mc_reg_table(table, ci_table);
- if (ret)
- goto init_mc_done;
-
- ci_set_s0_mc_reg_index(ci_table);
-
- ret = ci_register_patching_mc_seq(adev, ci_table);
- if (ret)
- goto init_mc_done;
-
- ret = ci_set_mc_special_registers(adev, ci_table);
- if (ret)
- goto init_mc_done;
-
- ci_set_valid_flag(ci_table);
-
-init_mc_done:
- kfree(table);
-
- return ret;
-}
-
-static int ci_populate_mc_reg_addresses(struct amdgpu_device *adev,
- SMU7_Discrete_MCRegisters *mc_reg_table)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 i, j;
-
- for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
- if (pi->mc_reg_table.valid_flag & (1 << j)) {
- if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
- return -EINVAL;
- mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
- mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
- i++;
- }
- }
-
- mc_reg_table->last = (u8)i;
-
- return 0;
-}
-
-static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
- SMU7_Discrete_MCRegisterSet *data,
- u32 num_entries, u32 valid_flag)
-{
- u32 i, j;
-
- for (i = 0, j = 0; j < num_entries; j++) {
- if (valid_flag & (1 << j)) {
- data->value[i] = cpu_to_be32(entry->mc_data[j]);
- i++;
- }
- }
-}
-
-static void ci_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
- const u32 memory_clock,
- SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 i = 0;
-
- for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
- if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
- break;
- }
-
- if ((i == pi->mc_reg_table.num_entries) && (i > 0))
- --i;
-
- ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
- mc_reg_table_data, pi->mc_reg_table.last,
- pi->mc_reg_table.valid_flag);
-}
-
-static void ci_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
- SMU7_Discrete_MCRegisters *mc_reg_table)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- u32 i;
-
- for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
- ci_convert_mc_reg_table_entry_to_smc(adev,
- pi->dpm_table.mclk_table.dpm_levels[i].value,
- &mc_reg_table->data[i]);
-}
-
-static int ci_populate_initial_mc_reg_table(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- int ret;
-
- memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
-
- ret = ci_populate_mc_reg_addresses(adev, &pi->smc_mc_reg_table);
- if (ret)
- return ret;
- ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
-
- return amdgpu_ci_copy_bytes_to_smc(adev,
- pi->mc_reg_table_start,
- (u8 *)&pi->smc_mc_reg_table,
- sizeof(SMU7_Discrete_MCRegisters),
- pi->sram_end);
-}
-
-static int ci_update_and_upload_mc_reg_table(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
- return 0;
-
- memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
-
- ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
-
- return amdgpu_ci_copy_bytes_to_smc(adev,
- pi->mc_reg_table_start +
- offsetof(SMU7_Discrete_MCRegisters, data[0]),
- (u8 *)&pi->smc_mc_reg_table.data[0],
- sizeof(SMU7_Discrete_MCRegisterSet) *
- pi->dpm_table.mclk_table.count,
- pi->sram_end);
-}
-
-static void ci_enable_voltage_control(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
-
- tmp |= GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK;
- WREG32_SMC(ixGENERAL_PWRMGT, tmp);
-}
-
-static enum amdgpu_pcie_gen ci_get_maximum_link_speed(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state)
-{
- struct ci_ps *state = ci_get_ps(amdgpu_state);
- int i;
- u16 pcie_speed, max_speed = 0;
-
- for (i = 0; i < state->performance_level_count; i++) {
- pcie_speed = state->performance_levels[i].pcie_gen;
- if (max_speed < pcie_speed)
- max_speed = pcie_speed;
- }
-
- return max_speed;
-}
-
-static u16 ci_get_current_pcie_speed(struct amdgpu_device *adev)
-{
- u32 speed_cntl = 0;
-
- speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL) &
- PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;
- speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
-
- return (u16)speed_cntl;
-}
-
-static int ci_get_current_pcie_lane_number(struct amdgpu_device *adev)
-{
- u32 link_width = 0;
-
- link_width = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL) &
- PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK;
- link_width >>= PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
-
- switch (link_width) {
- case 1:
- return 1;
- case 2:
- return 2;
- case 3:
- return 4;
- case 4:
- return 8;
- case 0:
- case 6:
- default:
- return 16;
- }
-}
-
-static void ci_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_new_state,
- struct amdgpu_ps *amdgpu_current_state)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- enum amdgpu_pcie_gen target_link_speed =
- ci_get_maximum_link_speed(adev, amdgpu_new_state);
- enum amdgpu_pcie_gen current_link_speed;
-
- if (pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
- current_link_speed = ci_get_maximum_link_speed(adev, amdgpu_current_state);
- else
- current_link_speed = pi->force_pcie_gen;
-
- pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
- pi->pspp_notify_required = false;
- if (target_link_speed > current_link_speed) {
- switch (target_link_speed) {
-#ifdef CONFIG_ACPI
- case AMDGPU_PCIE_GEN3:
- if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
- break;
- pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
- if (current_link_speed == AMDGPU_PCIE_GEN2)
- break;
- case AMDGPU_PCIE_GEN2:
- if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
- break;
-#endif
- default:
- pi->force_pcie_gen = ci_get_current_pcie_speed(adev);
- break;
- }
- } else {
- if (target_link_speed < current_link_speed)
- pi->pspp_notify_required = true;
- }
-}
-
-static void ci_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_new_state,
- struct amdgpu_ps *amdgpu_current_state)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- enum amdgpu_pcie_gen target_link_speed =
- ci_get_maximum_link_speed(adev, amdgpu_new_state);
- u8 request;
-
- if (pi->pspp_notify_required) {
- if (target_link_speed == AMDGPU_PCIE_GEN3)
- request = PCIE_PERF_REQ_PECI_GEN3;
- else if (target_link_speed == AMDGPU_PCIE_GEN2)
- request = PCIE_PERF_REQ_PECI_GEN2;
- else
- request = PCIE_PERF_REQ_PECI_GEN1;
-
- if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
- (ci_get_current_pcie_speed(adev) > 0))
- return;
-
-#ifdef CONFIG_ACPI
- amdgpu_acpi_pcie_performance_request(adev, request, false);
-#endif
- }
-}
-
-static int ci_set_private_data_variables_based_on_pptable(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
- struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddc_table =
- &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
- struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddci_table =
- &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
-
- if (allowed_sclk_vddc_table == NULL)
- return -EINVAL;
- if (allowed_sclk_vddc_table->count < 1)
- return -EINVAL;
- if (allowed_mclk_vddc_table == NULL)
- return -EINVAL;
- if (allowed_mclk_vddc_table->count < 1)
- return -EINVAL;
- if (allowed_mclk_vddci_table == NULL)
- return -EINVAL;
- if (allowed_mclk_vddci_table->count < 1)
- return -EINVAL;
-
- pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
- pi->max_vddc_in_pp_table =
- allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
-
- pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
- pi->max_vddci_in_pp_table =
- allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
-
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
- allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
- allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
- allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
- allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
-
- return 0;
-}
-
-static void ci_patch_with_vddc_leakage(struct amdgpu_device *adev, u16 *vddc)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
- u32 leakage_index;
-
- for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
- if (leakage_table->leakage_id[leakage_index] == *vddc) {
- *vddc = leakage_table->actual_voltage[leakage_index];
- break;
- }
- }
-}
-
-static void ci_patch_with_vddci_leakage(struct amdgpu_device *adev, u16 *vddci)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
- u32 leakage_index;
-
- for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
- if (leakage_table->leakage_id[leakage_index] == *vddci) {
- *vddci = leakage_table->actual_voltage[leakage_index];
- break;
- }
- }
-}
-
-static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
- struct amdgpu_clock_voltage_dependency_table *table)
-{
- u32 i;
-
- if (table) {
- for (i = 0; i < table->count; i++)
- ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
- }
-}
-
-static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct amdgpu_device *adev,
- struct amdgpu_clock_voltage_dependency_table *table)
-{
- u32 i;
-
- if (table) {
- for (i = 0; i < table->count; i++)
- ci_patch_with_vddci_leakage(adev, &table->entries[i].v);
- }
-}
-
-static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
- struct amdgpu_vce_clock_voltage_dependency_table *table)
-{
- u32 i;
-
- if (table) {
- for (i = 0; i < table->count; i++)
- ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
- }
-}
-
-static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
- struct amdgpu_uvd_clock_voltage_dependency_table *table)
-{
- u32 i;
-
- if (table) {
- for (i = 0; i < table->count; i++)
- ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
- }
-}
-
-static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct amdgpu_device *adev,
- struct amdgpu_phase_shedding_limits_table *table)
-{
- u32 i;
-
- if (table) {
- for (i = 0; i < table->count; i++)
- ci_patch_with_vddc_leakage(adev, &table->entries[i].voltage);
- }
-}
-
-static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct amdgpu_device *adev,
- struct amdgpu_clock_and_voltage_limits *table)
-{
- if (table) {
- ci_patch_with_vddc_leakage(adev, (u16 *)&table->vddc);
- ci_patch_with_vddci_leakage(adev, (u16 *)&table->vddci);
- }
-}
-
-static void ci_patch_cac_leakage_table_with_vddc_leakage(struct amdgpu_device *adev,
- struct amdgpu_cac_leakage_table *table)
-{
- u32 i;
-
- if (table) {
- for (i = 0; i < table->count; i++)
- ci_patch_with_vddc_leakage(adev, &table->entries[i].vddc);
- }
-}
-
-static void ci_patch_dependency_tables_with_leakage(struct amdgpu_device *adev)
-{
-
- ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
- &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
- ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
- &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
- ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
- &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
- ci_patch_clock_voltage_dependency_table_with_vddci_leakage(adev,
- &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
- ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(adev,
- &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
- ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(adev,
- &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
- ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
- &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
- ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
- &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
- ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(adev,
- &adev->pm.dpm.dyn_state.phase_shedding_limits_table);
- ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
- &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
- ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
- &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
- ci_patch_cac_leakage_table_with_vddc_leakage(adev,
- &adev->pm.dpm.dyn_state.cac_leakage_table);
-
-}
-
-static void ci_update_current_ps(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
-{
- struct ci_ps *new_ps = ci_get_ps(rps);
- struct ci_power_info *pi = ci_get_pi(adev);
-
- pi->current_rps = *rps;
- pi->current_ps = *new_ps;
- pi->current_rps.ps_priv = &pi->current_ps;
- adev->pm.dpm.current_ps = &pi->current_rps;
-}
-
-static void ci_update_requested_ps(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
-{
- struct ci_ps *new_ps = ci_get_ps(rps);
- struct ci_power_info *pi = ci_get_pi(adev);
-
- pi->requested_rps = *rps;
- pi->requested_ps = *new_ps;
- pi->requested_rps.ps_priv = &pi->requested_ps;
- adev->pm.dpm.requested_ps = &pi->requested_rps;
-}
-
-static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
- struct amdgpu_ps *new_ps = &requested_ps;
-
- ci_update_requested_ps(adev, new_ps);
-
- ci_apply_state_adjust_rules(adev, &pi->requested_rps);
-
- return 0;
-}
-
-static void ci_dpm_post_set_power_state(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct amdgpu_ps *new_ps = &pi->requested_rps;
-
- ci_update_current_ps(adev, new_ps);
-}
-
-
-static void ci_dpm_setup_asic(struct amdgpu_device *adev)
-{
- ci_read_clock_registers(adev);
- ci_enable_acpi_power_management(adev);
- ci_init_sclk_t(adev);
-}
-
-static int ci_dpm_enable(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
- int ret;
-
- if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
- ci_enable_voltage_control(adev);
- ret = ci_construct_voltage_tables(adev);
- if (ret) {
- DRM_ERROR("ci_construct_voltage_tables failed\n");
- return ret;
- }
- }
- if (pi->caps_dynamic_ac_timing) {
- ret = ci_initialize_mc_reg_table(adev);
- if (ret)
- pi->caps_dynamic_ac_timing = false;
- }
- if (pi->dynamic_ss)
- ci_enable_spread_spectrum(adev, true);
- if (pi->thermal_protection)
- ci_enable_thermal_protection(adev, true);
- ci_program_sstp(adev);
- ci_enable_display_gap(adev);
- ci_program_vc(adev);
- ret = ci_upload_firmware(adev);
- if (ret) {
- DRM_ERROR("ci_upload_firmware failed\n");
- return ret;
- }
- ret = ci_process_firmware_header(adev);
- if (ret) {
- DRM_ERROR("ci_process_firmware_header failed\n");
- return ret;
- }
- ret = ci_initial_switch_from_arb_f0_to_f1(adev);
- if (ret) {
- DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
- return ret;
- }
- ret = ci_init_smc_table(adev);
- if (ret) {
- DRM_ERROR("ci_init_smc_table failed\n");
- return ret;
- }
- ret = ci_init_arb_table_index(adev);
- if (ret) {
- DRM_ERROR("ci_init_arb_table_index failed\n");
- return ret;
- }
- if (pi->caps_dynamic_ac_timing) {
- ret = ci_populate_initial_mc_reg_table(adev);
- if (ret) {
- DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
- return ret;
- }
- }
- ret = ci_populate_pm_base(adev);
- if (ret) {
- DRM_ERROR("ci_populate_pm_base failed\n");
- return ret;
- }
- ci_dpm_start_smc(adev);
- ci_enable_vr_hot_gpio_interrupt(adev);
- ret = ci_notify_smc_display_change(adev, false);
- if (ret) {
- DRM_ERROR("ci_notify_smc_display_change failed\n");
- return ret;
- }
- ci_enable_sclk_control(adev, true);
- ret = ci_enable_ulv(adev, true);
- if (ret) {
- DRM_ERROR("ci_enable_ulv failed\n");
- return ret;
- }
- ret = ci_enable_ds_master_switch(adev, true);
- if (ret) {
- DRM_ERROR("ci_enable_ds_master_switch failed\n");
- return ret;
- }
- ret = ci_start_dpm(adev);
- if (ret) {
- DRM_ERROR("ci_start_dpm failed\n");
- return ret;
- }
- ret = ci_enable_didt(adev, true);
- if (ret) {
- DRM_ERROR("ci_enable_didt failed\n");
- return ret;
- }
- ret = ci_enable_smc_cac(adev, true);
- if (ret) {
- DRM_ERROR("ci_enable_smc_cac failed\n");
- return ret;
- }
- ret = ci_enable_power_containment(adev, true);
- if (ret) {
- DRM_ERROR("ci_enable_power_containment failed\n");
- return ret;
- }
-
- ret = ci_power_control_set_level(adev);
- if (ret) {
- DRM_ERROR("ci_power_control_set_level failed\n");
- return ret;
- }
-
- ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
-
- ret = ci_enable_thermal_based_sclk_dpm(adev, true);
- if (ret) {
- DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
- return ret;
- }
-
- ci_thermal_start_thermal_controller(adev);
-
- ci_update_current_ps(adev, boot_ps);
-
- return 0;
-}
-
-static void ci_dpm_disable(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
-
- amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
- AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
- amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
- AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
-
- ci_dpm_powergate_uvd(adev, true);
-
- if (!amdgpu_ci_is_smc_running(adev))
- return;
-
- ci_thermal_stop_thermal_controller(adev);
-
- if (pi->thermal_protection)
- ci_enable_thermal_protection(adev, false);
- ci_enable_power_containment(adev, false);
- ci_enable_smc_cac(adev, false);
- ci_enable_didt(adev, false);
- ci_enable_spread_spectrum(adev, false);
- ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
- ci_stop_dpm(adev);
- ci_enable_ds_master_switch(adev, false);
- ci_enable_ulv(adev, false);
- ci_clear_vc(adev);
- ci_reset_to_default(adev);
- ci_dpm_stop_smc(adev);
- ci_force_switch_to_arb_f0(adev);
- ci_enable_thermal_based_sclk_dpm(adev, false);
-
- ci_update_current_ps(adev, boot_ps);
-}
-
-static int ci_dpm_set_power_state(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct amdgpu_ps *new_ps = &pi->requested_rps;
- struct amdgpu_ps *old_ps = &pi->current_rps;
- int ret;
-
- ci_find_dpm_states_clocks_in_dpm_table(adev, new_ps);
- if (pi->pcie_performance_request)
- ci_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
- ret = ci_freeze_sclk_mclk_dpm(adev);
- if (ret) {
- DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
- return ret;
- }
- ret = ci_populate_and_upload_sclk_mclk_dpm_levels(adev, new_ps);
- if (ret) {
- DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
- return ret;
- }
- ret = ci_generate_dpm_level_enable_mask(adev, new_ps);
- if (ret) {
- DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
- return ret;
- }
-
- ret = ci_update_vce_dpm(adev, new_ps, old_ps);
- if (ret) {
- DRM_ERROR("ci_update_vce_dpm failed\n");
- return ret;
- }
-
- ret = ci_update_sclk_t(adev);
- if (ret) {
- DRM_ERROR("ci_update_sclk_t failed\n");
- return ret;
- }
- if (pi->caps_dynamic_ac_timing) {
- ret = ci_update_and_upload_mc_reg_table(adev);
- if (ret) {
- DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
- return ret;
- }
- }
- ret = ci_program_memory_timing_parameters(adev);
- if (ret) {
- DRM_ERROR("ci_program_memory_timing_parameters failed\n");
- return ret;
- }
- ret = ci_unfreeze_sclk_mclk_dpm(adev);
- if (ret) {
- DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
- return ret;
- }
- ret = ci_upload_dpm_level_enable_mask(adev);
- if (ret) {
- DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
- return ret;
- }
- if (pi->pcie_performance_request)
- ci_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
-
- return 0;
-}
-
-#if 0
-static void ci_dpm_reset_asic(struct amdgpu_device *adev)
-{
- ci_set_boot_state(adev);
-}
-#endif
-
-static void ci_dpm_display_configuration_changed(struct amdgpu_device *adev)
-{
- ci_program_display_gap(adev);
-}
-
-union power_info {
- struct _ATOM_POWERPLAY_INFO info;
- struct _ATOM_POWERPLAY_INFO_V2 info_2;
- struct _ATOM_POWERPLAY_INFO_V3 info_3;
- struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
- struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
- struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
-};
-
-union pplib_clock_info {
- struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
- struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
- struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
- struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
- struct _ATOM_PPLIB_SI_CLOCK_INFO si;
- struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
-};
-
-union pplib_power_state {
- struct _ATOM_PPLIB_STATE v1;
- struct _ATOM_PPLIB_STATE_V2 v2;
-};
-
-static void ci_parse_pplib_non_clock_info(struct amdgpu_device *adev,
- struct amdgpu_ps *rps,
- struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
- u8 table_rev)
-{
- rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
- rps->class = le16_to_cpu(non_clock_info->usClassification);
- rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
-
- if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
- rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
- rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
- } else {
- rps->vclk = 0;
- rps->dclk = 0;
- }
-
- if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
- adev->pm.dpm.boot_ps = rps;
- if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
- adev->pm.dpm.uvd_ps = rps;
-}
-
-static void ci_parse_pplib_clock_info(struct amdgpu_device *adev,
- struct amdgpu_ps *rps, int index,
- union pplib_clock_info *clock_info)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_ps *ps = ci_get_ps(rps);
- struct ci_pl *pl = &ps->performance_levels[index];
-
- ps->performance_level_count = index + 1;
-
- pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
- pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
- pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
- pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
-
- pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
- pi->sys_pcie_mask,
- pi->vbios_boot_state.pcie_gen_bootup_value,
- clock_info->ci.ucPCIEGen);
- pl->pcie_lane = amdgpu_get_pcie_lane_support(adev,
- pi->vbios_boot_state.pcie_lane_bootup_value,
- le16_to_cpu(clock_info->ci.usPCIELane));
-
- if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
- pi->acpi_pcie_gen = pl->pcie_gen;
- }
-
- if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
- pi->ulv.supported = true;
- pi->ulv.pl = *pl;
- pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
- }
-
- /* patch up boot state */
- if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
- pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
- pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
- pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
- pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
- }
-
- switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
- case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
- pi->use_pcie_powersaving_levels = true;
- if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
- pi->pcie_gen_powersaving.max = pl->pcie_gen;
- if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
- pi->pcie_gen_powersaving.min = pl->pcie_gen;
- if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
- pi->pcie_lane_powersaving.max = pl->pcie_lane;
- if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
- pi->pcie_lane_powersaving.min = pl->pcie_lane;
- break;
- case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
- pi->use_pcie_performance_levels = true;
- if (pi->pcie_gen_performance.max < pl->pcie_gen)
- pi->pcie_gen_performance.max = pl->pcie_gen;
- if (pi->pcie_gen_performance.min > pl->pcie_gen)
- pi->pcie_gen_performance.min = pl->pcie_gen;
- if (pi->pcie_lane_performance.max < pl->pcie_lane)
- pi->pcie_lane_performance.max = pl->pcie_lane;
- if (pi->pcie_lane_performance.min > pl->pcie_lane)
- pi->pcie_lane_performance.min = pl->pcie_lane;
- break;
- default:
- break;
- }
-}
-
-static int ci_parse_power_table(struct amdgpu_device *adev)
-{
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
- union pplib_power_state *power_state;
- int i, j, k, non_clock_array_index, clock_array_index;
- union pplib_clock_info *clock_info;
- struct _StateArray *state_array;
- struct _ClockInfoArray *clock_info_array;
- struct _NonClockInfoArray *non_clock_info_array;
- union power_info *power_info;
- int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
- u8 frev, crev;
- u8 *power_state_offset;
- struct ci_ps *ps;
-
- if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset))
- return -EINVAL;
- power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
-
- amdgpu_add_thermal_controller(adev);
-
- state_array = (struct _StateArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usStateArrayOffset));
- clock_info_array = (struct _ClockInfoArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
- non_clock_info_array = (struct _NonClockInfoArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
-
- adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
- state_array->ucNumEntries, GFP_KERNEL);
- if (!adev->pm.dpm.ps)
- return -ENOMEM;
- power_state_offset = (u8 *)state_array->states;
- for (i = 0; i < state_array->ucNumEntries; i++) {
- u8 *idx;
- power_state = (union pplib_power_state *)power_state_offset;
- non_clock_array_index = power_state->v2.nonClockInfoIndex;
- non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
- &non_clock_info_array->nonClockInfo[non_clock_array_index];
- ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
- if (ps == NULL) {
- kfree(adev->pm.dpm.ps);
- return -ENOMEM;
- }
- adev->pm.dpm.ps[i].ps_priv = ps;
- ci_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
- non_clock_info,
- non_clock_info_array->ucEntrySize);
- k = 0;
- idx = (u8 *)&power_state->v2.clockInfoIndex[0];
- for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
- clock_array_index = idx[j];
- if (clock_array_index >= clock_info_array->ucNumEntries)
- continue;
- if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
- break;
- clock_info = (union pplib_clock_info *)
- ((u8 *)&clock_info_array->clockInfo[0] +
- (clock_array_index * clock_info_array->ucEntrySize));
- ci_parse_pplib_clock_info(adev,
- &adev->pm.dpm.ps[i], k,
- clock_info);
- k++;
- }
- power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
- }
- adev->pm.dpm.num_ps = state_array->ucNumEntries;
-
- /* fill in the vce power states */
- for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
- u32 sclk, mclk;
- clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
- clock_info = (union pplib_clock_info *)
- &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
- sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
- sclk |= clock_info->ci.ucEngineClockHigh << 16;
- mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
- mclk |= clock_info->ci.ucMemoryClockHigh << 16;
- adev->pm.dpm.vce_states[i].sclk = sclk;
- adev->pm.dpm.vce_states[i].mclk = mclk;
- }
-
- return 0;
-}
-
-static int ci_get_vbios_boot_values(struct amdgpu_device *adev,
- struct ci_vbios_boot_state *boot_state)
-{
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
- ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
- u8 frev, crev;
- u16 data_offset;
-
- if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset)) {
- firmware_info =
- (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
- data_offset);
- boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
- boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
- boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
- boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(adev);
- boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(adev);
- boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
- boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
-
- return 0;
- }
- return -EINVAL;
-}
-
-static void ci_dpm_fini(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->pm.dpm.num_ps; i++) {
- kfree(adev->pm.dpm.ps[i].ps_priv);
- }
- kfree(adev->pm.dpm.ps);
- kfree(adev->pm.dpm.priv);
- kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
- amdgpu_free_extended_power_table(adev);
-}
-
-/**
- * ci_dpm_init_microcode - load ucode images from disk
- *
- * @adev: amdgpu_device pointer
- *
- * Use the firmware interface to load the ucode images into
- * the driver (not loaded into hw).
- * Returns 0 on success, error on failure.
- */
-static int ci_dpm_init_microcode(struct amdgpu_device *adev)
-{
- const char *chip_name;
- char fw_name[30];
- int err;
-
- DRM_DEBUG("\n");
-
- switch (adev->asic_type) {
- case CHIP_BONAIRE:
- if ((adev->pdev->revision == 0x80) ||
- (adev->pdev->revision == 0x81) ||
- (adev->pdev->device == 0x665f))
- chip_name = "bonaire_k";
- else
- chip_name = "bonaire";
- break;
- case CHIP_HAWAII:
- if (adev->pdev->revision == 0x80)
- chip_name = "hawaii_k";
- else
- chip_name = "hawaii";
- break;
- case CHIP_KAVERI:
- case CHIP_KABINI:
- case CHIP_MULLINS:
- default: BUG();
- }
-
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
- err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->pm.fw);
-
-out:
- if (err) {
- pr_err("cik_smc: Failed to load firmware \"%s\"\n", fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
- }
- return err;
-}
-
-static int ci_dpm_init(struct amdgpu_device *adev)
-{
- int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
- SMU7_Discrete_DpmTable *dpm_table;
- struct amdgpu_gpio_rec gpio;
- u16 data_offset, size;
- u8 frev, crev;
- struct ci_power_info *pi;
- int ret;
-
- pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
- if (pi == NULL)
- return -ENOMEM;
- adev->pm.dpm.priv = pi;
-
- pi->sys_pcie_mask =
- (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
- CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
-
- pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
-
- pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
- pi->pcie_gen_performance.min = AMDGPU_PCIE_GEN3;
- pi->pcie_gen_powersaving.max = AMDGPU_PCIE_GEN1;
- pi->pcie_gen_powersaving.min = AMDGPU_PCIE_GEN3;
-
- pi->pcie_lane_performance.max = 0;
- pi->pcie_lane_performance.min = 16;
- pi->pcie_lane_powersaving.max = 0;
- pi->pcie_lane_powersaving.min = 16;
-
- ret = ci_get_vbios_boot_values(adev, &pi->vbios_boot_state);
- if (ret) {
- ci_dpm_fini(adev);
- return ret;
- }
-
- ret = amdgpu_get_platform_caps(adev);
- if (ret) {
- ci_dpm_fini(adev);
- return ret;
- }
-
- ret = amdgpu_parse_extended_power_table(adev);
- if (ret) {
- ci_dpm_fini(adev);
- return ret;
- }
-
- ret = ci_parse_power_table(adev);
- if (ret) {
- ci_dpm_fini(adev);
- return ret;
- }
-
- pi->dll_default_on = false;
- pi->sram_end = SMC_RAM_END;
-
- pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
- pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
- pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
- pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
- pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
- pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
- pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
- pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
-
- pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
-
- pi->sclk_dpm_key_disabled = 0;
- pi->mclk_dpm_key_disabled = 0;
- pi->pcie_dpm_key_disabled = 0;
- pi->thermal_sclk_dpm_enabled = 0;
-
- if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK)
- pi->caps_sclk_ds = true;
- else
- pi->caps_sclk_ds = false;
-
- pi->mclk_strobe_mode_threshold = 40000;
- pi->mclk_stutter_mode_threshold = 40000;
- pi->mclk_edc_enable_threshold = 40000;
- pi->mclk_edc_wr_enable_threshold = 40000;
-
- ci_initialize_powertune_defaults(adev);
-
- pi->caps_fps = false;
-
- pi->caps_sclk_throttle_low_notification = false;
-
- pi->caps_uvd_dpm = true;
- pi->caps_vce_dpm = true;
-
- ci_get_leakage_voltages(adev);
- ci_patch_dependency_tables_with_leakage(adev);
- ci_set_private_data_variables_based_on_pptable(adev);
-
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
- kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
- ci_dpm_fini(adev);
- return -ENOMEM;
- }
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
- adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
-
- adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
- adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
- adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
-
- adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
- adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
- adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
- adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
-
- if (adev->asic_type == CHIP_HAWAII) {
- pi->thermal_temp_setting.temperature_low = 94500;
- pi->thermal_temp_setting.temperature_high = 95000;
- pi->thermal_temp_setting.temperature_shutdown = 104000;
- } else {
- pi->thermal_temp_setting.temperature_low = 99500;
- pi->thermal_temp_setting.temperature_high = 100000;
- pi->thermal_temp_setting.temperature_shutdown = 104000;
- }
-
- pi->uvd_enabled = false;
-
- dpm_table = &pi->smc_state_table;
-
- gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_VRHOT_GPIO_PINID);
- if (gpio.valid) {
- dpm_table->VRHotGpio = gpio.shift;
- adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
- } else {
- dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
- adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
- }
-
- gpio = amdgpu_atombios_lookup_gpio(adev, PP_AC_DC_SWITCH_GPIO_PINID);
- if (gpio.valid) {
- dpm_table->AcDcGpio = gpio.shift;
- adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
- } else {
- dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
- adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
- }
-
- gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_PCC_GPIO_PINID);
- if (gpio.valid) {
- u32 tmp = RREG32_SMC(ixCNB_PWRMGT_CNTL);
-
- switch (gpio.shift) {
- case 0:
- tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
- tmp |= 1 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
- break;
- case 1:
- tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
- tmp |= 2 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
- break;
- case 2:
- tmp |= CNB_PWRMGT_CNTL__GNB_SLOW_MASK;
- break;
- case 3:
- tmp |= CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK;
- break;
- case 4:
- tmp |= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK;
- break;
- default:
- DRM_INFO("Invalid PCC GPIO: %u!\n", gpio.shift);
- break;
- }
- WREG32_SMC(ixCNB_PWRMGT_CNTL, tmp);
- }
-
- pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
- pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
- pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
- if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
- pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
- else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
- pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
-
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
- if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
- pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
- else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
- pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
- else
- adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
- }
-
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
- if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
- pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
- else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
- pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
- else
- adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
- }
-
- pi->vddc_phase_shed_control = true;
-
-#if defined(CONFIG_ACPI)
- pi->pcie_performance_request =
- amdgpu_acpi_is_pcie_performance_request_supported(adev);
-#else
- pi->pcie_performance_request = false;
-#endif
-
- if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- pi->caps_sclk_ss_support = true;
- pi->caps_mclk_ss_support = true;
- pi->dynamic_ss = true;
- } else {
- pi->caps_sclk_ss_support = false;
- pi->caps_mclk_ss_support = false;
- pi->dynamic_ss = true;
- }
-
- if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
- pi->thermal_protection = true;
- else
- pi->thermal_protection = false;
-
- pi->caps_dynamic_ac_timing = true;
-
- pi->uvd_power_gated = true;
-
- /* make sure dc limits are valid */
- if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
- (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
- adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
- adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
-
- pi->fan_ctrl_is_in_default_mode = true;
-
- return 0;
-}
-
-static void
-ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
- struct seq_file *m)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct amdgpu_ps *rps = &pi->current_rps;
- u32 sclk = ci_get_average_sclk_freq(adev);
- u32 mclk = ci_get_average_mclk_freq(adev);
- u32 activity_percent = 50;
- int ret;
-
- ret = ci_read_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, AverageGraphicsA),
- &activity_percent);
-
- if (ret == 0) {
- activity_percent += 0x80;
- activity_percent >>= 8;
- activity_percent = activity_percent > 100 ? 100 : activity_percent;
- }
-
- seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
- seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
- seq_printf(m, "power level avg sclk: %u mclk: %u\n",
- sclk, mclk);
- seq_printf(m, "GPU load: %u %%\n", activity_percent);
-}
-
-static void ci_dpm_print_power_state(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
-{
- struct ci_ps *ps = ci_get_ps(rps);
- struct ci_pl *pl;
- int i;
-
- amdgpu_dpm_print_class_info(rps->class, rps->class2);
- amdgpu_dpm_print_cap_info(rps->caps);
- printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
- for (i = 0; i < ps->performance_level_count; i++) {
- pl = &ps->performance_levels[i];
- printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
- i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
- }
- amdgpu_dpm_print_ps_status(adev, rps);
-}
-
-static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1,
- const struct ci_pl *ci_cpl2)
-{
- return ((ci_cpl1->mclk == ci_cpl2->mclk) &&
- (ci_cpl1->sclk == ci_cpl2->sclk) &&
- (ci_cpl1->pcie_gen == ci_cpl2->pcie_gen) &&
- (ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
-}
-
-static int ci_check_state_equal(struct amdgpu_device *adev,
- struct amdgpu_ps *cps,
- struct amdgpu_ps *rps,
- bool *equal)
-{
- struct ci_ps *ci_cps;
- struct ci_ps *ci_rps;
- int i;
-
- if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
- return -EINVAL;
-
- ci_cps = ci_get_ps(cps);
- ci_rps = ci_get_ps(rps);
-
- if (ci_cps == NULL) {
- *equal = false;
- return 0;
- }
-
- if (ci_cps->performance_level_count != ci_rps->performance_level_count) {
-
- *equal = false;
- return 0;
- }
-
- for (i = 0; i < ci_cps->performance_level_count; i++) {
- if (!ci_are_power_levels_equal(&(ci_cps->performance_levels[i]),
- &(ci_rps->performance_levels[i]))) {
- *equal = false;
- return 0;
- }
- }
-
- /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
- *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
- *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
-
- return 0;
-}
-
-static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
-
- if (low)
- return requested_state->performance_levels[0].sclk;
- else
- return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
-}
-
-static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
-
- if (low)
- return requested_state->performance_levels[0].mclk;
- else
- return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
-}
-
-/* get temperature in millidegrees */
-static int ci_dpm_get_temp(struct amdgpu_device *adev)
-{
- u32 temp;
- int actual_temp = 0;
-
- temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
- CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
-
- if (temp & 0x200)
- actual_temp = 255;
- else
- actual_temp = temp & 0x1ff;
-
- actual_temp = actual_temp * 1000;
-
- return actual_temp;
-}
-
-static int ci_set_temperature_range(struct amdgpu_device *adev)
-{
- int ret;
-
- ret = ci_thermal_enable_alert(adev, false);
- if (ret)
- return ret;
- ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
- CISLANDS_TEMP_RANGE_MAX);
- if (ret)
- return ret;
- ret = ci_thermal_enable_alert(adev, true);
- if (ret)
- return ret;
- return ret;
-}
-
-static int ci_dpm_early_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- ci_dpm_set_dpm_funcs(adev);
- ci_dpm_set_irq_funcs(adev);
-
- return 0;
-}
-
-static int ci_dpm_late_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!amdgpu_dpm)
- return 0;
-
- /* init the sysfs and debugfs files late */
- ret = amdgpu_pm_sysfs_init(adev);
- if (ret)
- return ret;
-
- ret = ci_set_temperature_range(adev);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int ci_dpm_sw_init(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230,
- &adev->pm.dpm.thermal.irq);
- if (ret)
- return ret;
-
- ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231,
- &adev->pm.dpm.thermal.irq);
- if (ret)
- return ret;
-
- /* default to balanced state */
- adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
- adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
- adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
- adev->pm.default_sclk = adev->clock.default_sclk;
- adev->pm.default_mclk = adev->clock.default_mclk;
- adev->pm.current_sclk = adev->clock.default_sclk;
- adev->pm.current_mclk = adev->clock.default_mclk;
- adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
-
- ret = ci_dpm_init_microcode(adev);
- if (ret)
- return ret;
-
- if (amdgpu_dpm == 0)
- return 0;
-
- INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
- mutex_lock(&adev->pm.mutex);
- ret = ci_dpm_init(adev);
- if (ret)
- goto dpm_failed;
- adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
- if (amdgpu_dpm == 1)
- amdgpu_pm_print_power_states(adev);
- mutex_unlock(&adev->pm.mutex);
- DRM_INFO("amdgpu: dpm initialized\n");
-
- return 0;
-
-dpm_failed:
- ci_dpm_fini(adev);
- mutex_unlock(&adev->pm.mutex);
- DRM_ERROR("amdgpu: dpm initialization failed\n");
- return ret;
-}
-
-static int ci_dpm_sw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- flush_work(&adev->pm.dpm.thermal.work);
-
- mutex_lock(&adev->pm.mutex);
- amdgpu_pm_sysfs_fini(adev);
- ci_dpm_fini(adev);
- mutex_unlock(&adev->pm.mutex);
-
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
-
- return 0;
-}
-
-static int ci_dpm_hw_init(void *handle)
-{
- int ret;
-
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!amdgpu_dpm) {
- ret = ci_upload_firmware(adev);
- if (ret) {
- DRM_ERROR("ci_upload_firmware failed\n");
- return ret;
- }
- ci_dpm_start_smc(adev);
- return 0;
- }
-
- mutex_lock(&adev->pm.mutex);
- ci_dpm_setup_asic(adev);
- ret = ci_dpm_enable(adev);
- if (ret)
- adev->pm.dpm_enabled = false;
- else
- adev->pm.dpm_enabled = true;
- mutex_unlock(&adev->pm.mutex);
-
- return ret;
-}
-
-static int ci_dpm_hw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->pm.dpm_enabled) {
- mutex_lock(&adev->pm.mutex);
- ci_dpm_disable(adev);
- mutex_unlock(&adev->pm.mutex);
- } else {
- ci_dpm_stop_smc(adev);
- }
-
- return 0;
-}
-
-static int ci_dpm_suspend(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->pm.dpm_enabled) {
- mutex_lock(&adev->pm.mutex);
- amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
- AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
- amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
- AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
- adev->pm.dpm.last_user_state = adev->pm.dpm.user_state;
- adev->pm.dpm.last_state = adev->pm.dpm.state;
- adev->pm.dpm.user_state = POWER_STATE_TYPE_INTERNAL_BOOT;
- adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_BOOT;
- mutex_unlock(&adev->pm.mutex);
- amdgpu_pm_compute_clocks(adev);
-
- }
-
- return 0;
-}
-
-static int ci_dpm_resume(void *handle)
-{
- int ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (adev->pm.dpm_enabled) {
- /* asic init will reset to the boot state */
- mutex_lock(&adev->pm.mutex);
- ci_dpm_setup_asic(adev);
- ret = ci_dpm_enable(adev);
- if (ret)
- adev->pm.dpm_enabled = false;
- else
- adev->pm.dpm_enabled = true;
- adev->pm.dpm.user_state = adev->pm.dpm.last_user_state;
- adev->pm.dpm.state = adev->pm.dpm.last_state;
- mutex_unlock(&adev->pm.mutex);
- if (adev->pm.dpm_enabled)
- amdgpu_pm_compute_clocks(adev);
- }
- return 0;
-}
-
-static bool ci_dpm_is_idle(void *handle)
-{
- /* XXX */
- return true;
-}
-
-static int ci_dpm_wait_for_idle(void *handle)
-{
- /* XXX */
- return 0;
-}
-
-static int ci_dpm_soft_reset(void *handle)
-{
- return 0;
-}
-
-static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- u32 cg_thermal_int;
-
- switch (type) {
- case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
- cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
- WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
- cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
- WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
- break;
- default:
- break;
- }
- break;
-
- case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
- cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
- WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
- cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
- WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
- break;
- default:
- break;
- }
- break;
-
- default:
- break;
- }
- return 0;
-}
-
-static int ci_dpm_process_interrupt(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- bool queue_thermal = false;
-
- if (entry == NULL)
- return -EINVAL;
-
- switch (entry->src_id) {
- case 230: /* thermal low to high */
- DRM_DEBUG("IH: thermal low to high\n");
- adev->pm.dpm.thermal.high_to_low = false;
- queue_thermal = true;
- break;
- case 231: /* thermal high to low */
- DRM_DEBUG("IH: thermal high to low\n");
- adev->pm.dpm.thermal.high_to_low = true;
- queue_thermal = true;
- break;
- default:
- break;
- }
-
- if (queue_thermal)
- schedule_work(&adev->pm.dpm.thermal.work);
-
- return 0;
-}
-
-static int ci_dpm_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
-{
- return 0;
-}
-
-static int ci_dpm_set_powergating_state(void *handle,
- enum amd_powergating_state state)
-{
- return 0;
-}
-
-static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
- enum pp_clock_type type, char *buf)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
- struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
- struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
-
- int i, now, size = 0;
- uint32_t clock, pcie_speed;
-
- switch (type) {
- case PP_SCLK:
- amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency);
- clock = RREG32(mmSMC_MSG_ARG_0);
-
- for (i = 0; i < sclk_table->count; i++) {
- if (clock > sclk_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < sclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, sclk_table->dpm_levels[i].value / 100,
- (i == now) ? "*" : "");
- break;
- case PP_MCLK:
- amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency);
- clock = RREG32(mmSMC_MSG_ARG_0);
-
- for (i = 0; i < mclk_table->count; i++) {
- if (clock > mclk_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < mclk_table->count; i++)
- size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, mclk_table->dpm_levels[i].value / 100,
- (i == now) ? "*" : "");
- break;
- case PP_PCIE:
- pcie_speed = ci_get_current_pcie_speed(adev);
- for (i = 0; i < pcie_table->count; i++) {
- if (pcie_speed != pcie_table->dpm_levels[i].value)
- continue;
- break;
- }
- now = i;
-
- for (i = 0; i < pcie_table->count; i++)
- size += sprintf(buf + size, "%d: %s %s\n", i,
- (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
- (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
- (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
- (i == now) ? "*" : "");
- break;
- default:
- break;
- }
-
- return size;
-}
-
-static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
- enum pp_clock_type type, uint32_t mask)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
- AMD_DPM_FORCED_LEVEL_LOW |
- AMD_DPM_FORCED_LEVEL_HIGH))
- return -EINVAL;
-
- switch (type) {
- case PP_SCLK:
- if (!pi->sclk_dpm_key_disabled)
- amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
- break;
-
- case PP_MCLK:
- if (!pi->mclk_dpm_key_disabled)
- amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
- break;
-
- case PP_PCIE:
- {
- uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
- uint32_t level = 0;
-
- while (tmp >>= 1)
- level++;
-
- if (!pi->pcie_dpm_key_disabled)
- amdgpu_ci_send_msg_to_smc_with_parameter(adev,
- PPSMC_MSG_PCIeDPM_ForceLevel,
- level);
- break;
- }
- default:
- break;
- }
-
- return 0;
-}
-
-static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
- struct ci_single_dpm_table *golden_sclk_table =
- &(pi->golden_dpm_table.sclk_table);
- int value;
-
- value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
- 100 /
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
- return value;
-}
-
-static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
- struct ci_single_dpm_table *golden_sclk_table =
- &(pi->golden_dpm_table.sclk_table);
-
- if (value > 20)
- value = 20;
-
- ps->performance_levels[ps->performance_level_count - 1].sclk =
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
- value / 100 +
- golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
-
- return 0;
-}
-
-static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
- struct ci_single_dpm_table *golden_mclk_table =
- &(pi->golden_dpm_table.mclk_table);
- int value;
-
- value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
- 100 /
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
- return value;
-}
-
-static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
- struct ci_single_dpm_table *golden_mclk_table =
- &(pi->golden_dpm_table.mclk_table);
-
- if (value > 20)
- value = 20;
-
- ps->performance_levels[ps->performance_level_count - 1].mclk =
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
- value / 100 +
- golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
-
- return 0;
-}
-
-static int ci_dpm_get_power_profile_state(struct amdgpu_device *adev,
- struct amd_pp_profile *query)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (!pi || !query)
- return -EINVAL;
-
- if (query->type == AMD_PP_GFX_PROFILE)
- memcpy(query, &pi->gfx_power_profile,
- sizeof(struct amd_pp_profile));
- else if (query->type == AMD_PP_COMPUTE_PROFILE)
- memcpy(query, &pi->compute_power_profile,
- sizeof(struct amd_pp_profile));
- else
- return -EINVAL;
-
- return 0;
-}
-
-static int ci_populate_requested_graphic_levels(struct amdgpu_device *adev,
- struct amd_pp_profile *request)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_dpm_table *dpm_table = &(pi->dpm_table);
- struct SMU7_Discrete_GraphicsLevel *levels =
- pi->smc_state_table.GraphicsLevel;
- uint32_t array = pi->dpm_table_start +
- offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
- uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
- SMU7_MAX_LEVELS_GRAPHICS;
- uint32_t i;
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- levels[i].ActivityLevel =
- cpu_to_be16(request->activity_threshold);
- levels[i].EnabledForActivity = 1;
- levels[i].UpH = request->up_hyst;
- levels[i].DownH = request->down_hyst;
- }
-
- return amdgpu_ci_copy_bytes_to_smc(adev, array, (uint8_t *)levels,
- array_size, pi->sram_end);
-}
-
-static void ci_find_min_clock_masks(struct amdgpu_device *adev,
- uint32_t *sclk_mask, uint32_t *mclk_mask,
- uint32_t min_sclk, uint32_t min_mclk)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct ci_dpm_table *dpm_table = &(pi->dpm_table);
- uint32_t i;
-
- for (i = 0; i < dpm_table->sclk_table.count; i++) {
- if (dpm_table->sclk_table.dpm_levels[i].enabled &&
- dpm_table->sclk_table.dpm_levels[i].value >= min_sclk)
- *sclk_mask |= 1 << i;
- }
-
- for (i = 0; i < dpm_table->mclk_table.count; i++) {
- if (dpm_table->mclk_table.dpm_levels[i].enabled &&
- dpm_table->mclk_table.dpm_levels[i].value >= min_mclk)
- *mclk_mask |= 1 << i;
- }
-}
-
-static int ci_set_power_profile_state(struct amdgpu_device *adev,
- struct amd_pp_profile *request)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- int tmp_result, result = 0;
- uint32_t sclk_mask = 0, mclk_mask = 0;
-
- tmp_result = ci_freeze_sclk_mclk_dpm(adev);
- if (tmp_result) {
- DRM_ERROR("Failed to freeze SCLK MCLK DPM!");
- result = tmp_result;
- }
-
- tmp_result = ci_populate_requested_graphic_levels(adev,
- request);
- if (tmp_result) {
- DRM_ERROR("Failed to populate requested graphic levels!");
- result = tmp_result;
- }
-
- tmp_result = ci_unfreeze_sclk_mclk_dpm(adev);
- if (tmp_result) {
- DRM_ERROR("Failed to unfreeze SCLK MCLK DPM!");
- result = tmp_result;
- }
-
- ci_find_min_clock_masks(adev, &sclk_mask, &mclk_mask,
- request->min_sclk, request->min_mclk);
-
- if (sclk_mask) {
- if (!pi->sclk_dpm_key_disabled)
- amdgpu_ci_send_msg_to_smc_with_parameter(
- adev,
- PPSMC_MSG_SCLKDPM_SetEnabledMask,
- pi->dpm_level_enable_mask.
- sclk_dpm_enable_mask &
- sclk_mask);
- }
-
- if (mclk_mask) {
- if (!pi->mclk_dpm_key_disabled)
- amdgpu_ci_send_msg_to_smc_with_parameter(
- adev,
- PPSMC_MSG_MCLKDPM_SetEnabledMask,
- pi->dpm_level_enable_mask.
- mclk_dpm_enable_mask &
- mclk_mask);
- }
-
-
- return result;
-}
-
-static int ci_dpm_set_power_profile_state(struct amdgpu_device *adev,
- struct amd_pp_profile *request)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- int ret = -1;
-
- if (!pi || !request)
- return -EINVAL;
-
- if (adev->pm.dpm.forced_level !=
- AMD_DPM_FORCED_LEVEL_AUTO)
- return -EINVAL;
-
- if (request->min_sclk ||
- request->min_mclk ||
- request->activity_threshold ||
- request->up_hyst ||
- request->down_hyst) {
- if (request->type == AMD_PP_GFX_PROFILE)
- memcpy(&pi->gfx_power_profile, request,
- sizeof(struct amd_pp_profile));
- else if (request->type == AMD_PP_COMPUTE_PROFILE)
- memcpy(&pi->compute_power_profile, request,
- sizeof(struct amd_pp_profile));
- else
- return -EINVAL;
-
- if (request->type == pi->current_power_profile)
- ret = ci_set_power_profile_state(
- adev,
- request);
- } else {
- /* set power profile if it exists */
- switch (request->type) {
- case AMD_PP_GFX_PROFILE:
- ret = ci_set_power_profile_state(
- adev,
- &pi->gfx_power_profile);
- break;
- case AMD_PP_COMPUTE_PROFILE:
- ret = ci_set_power_profile_state(
- adev,
- &pi->compute_power_profile);
- break;
- default:
- return -EINVAL;
- }
- }
-
- if (!ret)
- pi->current_power_profile = request->type;
-
- return 0;
-}
-
-static int ci_dpm_reset_power_profile_state(struct amdgpu_device *adev,
- struct amd_pp_profile *request)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
-
- if (!pi || !request)
- return -EINVAL;
-
- if (request->type == AMD_PP_GFX_PROFILE) {
- pi->gfx_power_profile = pi->default_gfx_power_profile;
- return ci_dpm_set_power_profile_state(adev,
- &pi->gfx_power_profile);
- } else if (request->type == AMD_PP_COMPUTE_PROFILE) {
- pi->compute_power_profile =
- pi->default_compute_power_profile;
- return ci_dpm_set_power_profile_state(adev,
- &pi->compute_power_profile);
- } else
- return -EINVAL;
-}
-
-static int ci_dpm_switch_power_profile(struct amdgpu_device *adev,
- enum amd_pp_profile_type type)
-{
- struct ci_power_info *pi = ci_get_pi(adev);
- struct amd_pp_profile request = {0};
-
- if (!pi)
- return -EINVAL;
-
- if (pi->current_power_profile != type) {
- request.type = type;
- return ci_dpm_set_power_profile_state(adev, &request);
- }
-
- return 0;
-}
-
-static int ci_dpm_read_sensor(struct amdgpu_device *adev, int idx,
- void *value, int *size)
-{
- u32 activity_percent = 50;
- int ret;
-
- /* size must be at least 4 bytes for all sensors */
- if (*size < 4)
- return -EINVAL;
-
- switch (idx) {
- case AMDGPU_PP_SENSOR_GFX_SCLK:
- *((uint32_t *)value) = ci_get_average_sclk_freq(adev);
- *size = 4;
- return 0;
- case AMDGPU_PP_SENSOR_GFX_MCLK:
- *((uint32_t *)value) = ci_get_average_mclk_freq(adev);
- *size = 4;
- return 0;
- case AMDGPU_PP_SENSOR_GPU_TEMP:
- *((uint32_t *)value) = ci_dpm_get_temp(adev);
- *size = 4;
- return 0;
- case AMDGPU_PP_SENSOR_GPU_LOAD:
- ret = ci_read_smc_soft_register(adev,
- offsetof(SMU7_SoftRegisters,
- AverageGraphicsA),
- &activity_percent);
- if (ret == 0) {
- activity_percent += 0x80;
- activity_percent >>= 8;
- activity_percent =
- activity_percent > 100 ? 100 : activity_percent;
- }
- *((uint32_t *)value) = activity_percent;
- *size = 4;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-const struct amd_ip_funcs ci_dpm_ip_funcs = {
- .name = "ci_dpm",
- .early_init = ci_dpm_early_init,
- .late_init = ci_dpm_late_init,
- .sw_init = ci_dpm_sw_init,
- .sw_fini = ci_dpm_sw_fini,
- .hw_init = ci_dpm_hw_init,
- .hw_fini = ci_dpm_hw_fini,
- .suspend = ci_dpm_suspend,
- .resume = ci_dpm_resume,
- .is_idle = ci_dpm_is_idle,
- .wait_for_idle = ci_dpm_wait_for_idle,
- .soft_reset = ci_dpm_soft_reset,
- .set_clockgating_state = ci_dpm_set_clockgating_state,
- .set_powergating_state = ci_dpm_set_powergating_state,
-};
-
-static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
- .get_temperature = &ci_dpm_get_temp,
- .pre_set_power_state = &ci_dpm_pre_set_power_state,
- .set_power_state = &ci_dpm_set_power_state,
- .post_set_power_state = &ci_dpm_post_set_power_state,
- .display_configuration_changed = &ci_dpm_display_configuration_changed,
- .get_sclk = &ci_dpm_get_sclk,
- .get_mclk = &ci_dpm_get_mclk,
- .print_power_state = &ci_dpm_print_power_state,
- .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
- .force_performance_level = &ci_dpm_force_performance_level,
- .vblank_too_short = &ci_dpm_vblank_too_short,
- .powergate_uvd = &ci_dpm_powergate_uvd,
- .set_fan_control_mode = &ci_dpm_set_fan_control_mode,
- .get_fan_control_mode = &ci_dpm_get_fan_control_mode,
- .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
- .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
- .print_clock_levels = ci_dpm_print_clock_levels,
- .force_clock_level = ci_dpm_force_clock_level,
- .get_sclk_od = ci_dpm_get_sclk_od,
- .set_sclk_od = ci_dpm_set_sclk_od,
- .get_mclk_od = ci_dpm_get_mclk_od,
- .set_mclk_od = ci_dpm_set_mclk_od,
- .check_state_equal = ci_check_state_equal,
- .get_vce_clock_state = amdgpu_get_vce_clock_state,
- .get_power_profile_state = ci_dpm_get_power_profile_state,
- .set_power_profile_state = ci_dpm_set_power_profile_state,
- .reset_power_profile_state = ci_dpm_reset_power_profile_state,
- .switch_power_profile = ci_dpm_switch_power_profile,
- .read_sensor = ci_dpm_read_sensor,
-};
-
-static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
-{
- if (adev->pm.funcs == NULL)
- adev->pm.funcs = &ci_dpm_funcs;
-}
-
-static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
- .set = ci_dpm_set_interrupt_state,
- .process = ci_dpm_process_interrupt,
-};
-
-static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
-{
- adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
- adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h
deleted file mode 100644
index 84cbc9c45f4d..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * Copyright 2013 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef __CI_DPM_H__
-#define __CI_DPM_H__
-
-#include "amdgpu_atombios.h"
-#include "ppsmc.h"
-
-#define SMU__NUM_SCLK_DPM_STATE 8
-#define SMU__NUM_MCLK_DPM_LEVELS 6
-#define SMU__NUM_LCLK_DPM_LEVELS 8
-#define SMU__NUM_PCIE_DPM_LEVELS 8
-#include "smu7_discrete.h"
-
-#define CISLANDS_MAX_HARDWARE_POWERLEVELS 2
-
-#define CISLANDS_UNUSED_GPIO_PIN 0x7F
-
-struct ci_pl {
- u32 mclk;
- u32 sclk;
- enum amdgpu_pcie_gen pcie_gen;
- u16 pcie_lane;
-};
-
-struct ci_ps {
- u16 performance_level_count;
- bool dc_compatible;
- u32 sclk_t;
- struct ci_pl performance_levels[CISLANDS_MAX_HARDWARE_POWERLEVELS];
-};
-
-struct ci_dpm_level {
- bool enabled;
- u32 value;
- u32 param1;
-};
-
-#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
-#define MAX_REGULAR_DPM_NUMBER 8
-#define CISLAND_MINIMUM_ENGINE_CLOCK 800
-
-struct ci_single_dpm_table {
- u32 count;
- struct ci_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
-};
-
-struct ci_dpm_table {
- struct ci_single_dpm_table sclk_table;
- struct ci_single_dpm_table mclk_table;
- struct ci_single_dpm_table pcie_speed_table;
- struct ci_single_dpm_table vddc_table;
- struct ci_single_dpm_table vddci_table;
- struct ci_single_dpm_table mvdd_table;
-};
-
-struct ci_mc_reg_entry {
- u32 mclk_max;
- u32 mc_data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
-};
-
-struct ci_mc_reg_table {
- u8 last;
- u8 num_entries;
- u16 valid_flag;
- struct ci_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
- SMU7_Discrete_MCRegisterAddress mc_reg_address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
-};
-
-struct ci_ulv_parm
-{
- bool supported;
- u32 cg_ulv_parameter;
- u32 volt_change_delay;
- struct ci_pl pl;
-};
-
-#define CISLANDS_MAX_LEAKAGE_COUNT 8
-
-struct ci_leakage_voltage {
- u16 count;
- u16 leakage_id[CISLANDS_MAX_LEAKAGE_COUNT];
- u16 actual_voltage[CISLANDS_MAX_LEAKAGE_COUNT];
-};
-
-struct ci_dpm_level_enable_mask {
- u32 uvd_dpm_enable_mask;
- u32 vce_dpm_enable_mask;
- u32 acp_dpm_enable_mask;
- u32 samu_dpm_enable_mask;
- u32 sclk_dpm_enable_mask;
- u32 mclk_dpm_enable_mask;
- u32 pcie_dpm_enable_mask;
-};
-
-struct ci_vbios_boot_state
-{
- u16 mvdd_bootup_value;
- u16 vddc_bootup_value;
- u16 vddci_bootup_value;
- u32 sclk_bootup_value;
- u32 mclk_bootup_value;
- u16 pcie_gen_bootup_value;
- u16 pcie_lane_bootup_value;
-};
-
-struct ci_clock_registers {
- u32 cg_spll_func_cntl;
- u32 cg_spll_func_cntl_2;
- u32 cg_spll_func_cntl_3;
- u32 cg_spll_func_cntl_4;
- u32 cg_spll_spread_spectrum;
- u32 cg_spll_spread_spectrum_2;
- u32 dll_cntl;
- u32 mclk_pwrmgt_cntl;
- u32 mpll_ad_func_cntl;
- u32 mpll_dq_func_cntl;
- u32 mpll_func_cntl;
- u32 mpll_func_cntl_1;
- u32 mpll_func_cntl_2;
- u32 mpll_ss1;
- u32 mpll_ss2;
-};
-
-struct ci_thermal_temperature_setting {
- s32 temperature_low;
- s32 temperature_high;
- s32 temperature_shutdown;
-};
-
-struct ci_pcie_perf_range {
- u16 max;
- u16 min;
-};
-
-enum ci_pt_config_reg_type {
- CISLANDS_CONFIGREG_MMR = 0,
- CISLANDS_CONFIGREG_SMC_IND,
- CISLANDS_CONFIGREG_DIDT_IND,
- CISLANDS_CONFIGREG_CACHE,
- CISLANDS_CONFIGREG_MAX
-};
-
-#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001
-#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
-#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
-
-struct ci_pt_config_reg {
- u32 offset;
- u32 mask;
- u32 shift;
- u32 value;
- enum ci_pt_config_reg_type type;
-};
-
-struct ci_pt_defaults {
- u8 svi_load_line_en;
- u8 svi_load_line_vddc;
- u8 tdc_vddc_throttle_release_limit_perc;
- u8 tdc_mawt;
- u8 tdc_waterfall_ctl;
- u8 dte_ambient_temp_base;
- u32 display_cac;
- u32 bapm_temp_gradient;
- u16 bapmti_r[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
- u16 bapmti_rc[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
-};
-
-#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
-#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
-#define DPMTABLE_UPDATE_SCLK 0x00000004
-#define DPMTABLE_UPDATE_MCLK 0x00000008
-
-struct ci_power_info {
- struct ci_dpm_table dpm_table;
- struct ci_dpm_table golden_dpm_table;
- u32 voltage_control;
- u32 mvdd_control;
- u32 vddci_control;
- u32 active_auto_throttle_sources;
- struct ci_clock_registers clock_registers;
- u16 acpi_vddc;
- u16 acpi_vddci;
- enum amdgpu_pcie_gen force_pcie_gen;
- enum amdgpu_pcie_gen acpi_pcie_gen;
- struct ci_leakage_voltage vddc_leakage;
- struct ci_leakage_voltage vddci_leakage;
- u16 max_vddc_in_pp_table;
- u16 min_vddc_in_pp_table;
- u16 max_vddci_in_pp_table;
- u16 min_vddci_in_pp_table;
- u32 mclk_strobe_mode_threshold;
- u32 mclk_stutter_mode_threshold;
- u32 mclk_edc_enable_threshold;
- u32 mclk_edc_wr_enable_threshold;
- struct ci_vbios_boot_state vbios_boot_state;
- /* smc offsets */
- u32 sram_end;
- u32 dpm_table_start;
- u32 soft_regs_start;
- u32 mc_reg_table_start;
- u32 fan_table_start;
- u32 arb_table_start;
- /* smc tables */
- SMU7_Discrete_DpmTable smc_state_table;
- SMU7_Discrete_MCRegisters smc_mc_reg_table;
- SMU7_Discrete_PmFuses smc_powertune_table;
- /* other stuff */
- struct ci_mc_reg_table mc_reg_table;
- struct atom_voltage_table vddc_voltage_table;
- struct atom_voltage_table vddci_voltage_table;
- struct atom_voltage_table mvdd_voltage_table;
- struct ci_ulv_parm ulv;
- u32 power_containment_features;
- const struct ci_pt_defaults *powertune_defaults;
- u32 dte_tj_offset;
- bool vddc_phase_shed_control;
- struct ci_thermal_temperature_setting thermal_temp_setting;
- struct ci_dpm_level_enable_mask dpm_level_enable_mask;
- u32 need_update_smu7_dpm_table;
- u32 sclk_dpm_key_disabled;
- u32 mclk_dpm_key_disabled;
- u32 pcie_dpm_key_disabled;
- u32 thermal_sclk_dpm_enabled;
- struct ci_pcie_perf_range pcie_gen_performance;
- struct ci_pcie_perf_range pcie_lane_performance;
- struct ci_pcie_perf_range pcie_gen_powersaving;
- struct ci_pcie_perf_range pcie_lane_powersaving;
- u32 activity_target[SMU7_MAX_LEVELS_GRAPHICS];
- u32 mclk_activity_target;
- u32 low_sclk_interrupt_t;
- u32 last_mclk_dpm_enable_mask;
- u32 sys_pcie_mask;
- /* caps */
- bool caps_power_containment;
- bool caps_cac;
- bool caps_sq_ramping;
- bool caps_db_ramping;
- bool caps_td_ramping;
- bool caps_tcp_ramping;
- bool caps_fps;
- bool caps_sclk_ds;
- bool caps_sclk_ss_support;
- bool caps_mclk_ss_support;
- bool caps_uvd_dpm;
- bool caps_vce_dpm;
- bool caps_samu_dpm;
- bool caps_acp_dpm;
- bool caps_automatic_dc_transition;
- bool caps_sclk_throttle_low_notification;
- bool caps_dynamic_ac_timing;
- bool caps_od_fuzzy_fan_control_support;
- /* flags */
- bool thermal_protection;
- bool pcie_performance_request;
- bool dynamic_ss;
- bool dll_default_on;
- bool cac_enabled;
- bool uvd_enabled;
- bool battery_state;
- bool pspp_notify_required;
- bool enable_bapm_feature;
- bool enable_tdc_limit_feature;
- bool enable_pkg_pwr_tracking_feature;
- bool use_pcie_performance_levels;
- bool use_pcie_powersaving_levels;
- bool uvd_power_gated;
- /* driver states */
- struct amdgpu_ps current_rps;
- struct ci_ps current_ps;
- struct amdgpu_ps requested_rps;
- struct ci_ps requested_ps;
- /* fan control */
- bool fan_ctrl_is_in_default_mode;
- bool fan_is_controlled_by_smc;
- u32 t_min;
- u32 fan_ctrl_default_mode;
-
- /* power profile */
- struct amd_pp_profile gfx_power_profile;
- struct amd_pp_profile compute_power_profile;
- struct amd_pp_profile default_gfx_power_profile;
- struct amd_pp_profile default_compute_power_profile;
- enum amd_pp_profile_type current_power_profile;
-};
-
-#define CISLANDS_VOLTAGE_CONTROL_NONE 0x0
-#define CISLANDS_VOLTAGE_CONTROL_BY_GPIO 0x1
-#define CISLANDS_VOLTAGE_CONTROL_BY_SVID2 0x2
-
-#define CISLANDS_Q88_FORMAT_CONVERSION_UNIT 256
-
-#define CISLANDS_VRC_DFLT0 0x3FFFC000
-#define CISLANDS_VRC_DFLT1 0x000400
-#define CISLANDS_VRC_DFLT2 0xC00080
-#define CISLANDS_VRC_DFLT3 0xC00200
-#define CISLANDS_VRC_DFLT4 0xC01680
-#define CISLANDS_VRC_DFLT5 0xC00033
-#define CISLANDS_VRC_DFLT6 0xC00033
-#define CISLANDS_VRC_DFLT7 0x3FFFC000
-
-#define CISLANDS_CGULVPARAMETER_DFLT 0x00040035
-#define CISLAND_TARGETACTIVITY_DFLT 30
-#define CISLAND_MCLK_TARGETACTIVITY_DFLT 10
-
-#define PCIE_PERF_REQ_REMOVE_REGISTRY 0
-#define PCIE_PERF_REQ_FORCE_LOWPOWER 1
-#define PCIE_PERF_REQ_PECI_GEN1 2
-#define PCIE_PERF_REQ_PECI_GEN2 3
-#define PCIE_PERF_REQ_PECI_GEN3 4
-
-#define CISLANDS_SSTU_DFLT 0
-#define CISLANDS_SST_DFLT 0x00C8
-
-/* XXX are these ok? */
-#define CISLANDS_TEMP_RANGE_MIN (90 * 1000)
-#define CISLANDS_TEMP_RANGE_MAX (120 * 1000)
-
-int amdgpu_ci_copy_bytes_to_smc(struct amdgpu_device *adev,
- u32 smc_start_address,
- const u8 *src, u32 byte_count, u32 limit);
-void amdgpu_ci_start_smc(struct amdgpu_device *adev);
-void amdgpu_ci_reset_smc(struct amdgpu_device *adev);
-int amdgpu_ci_program_jump_on_start(struct amdgpu_device *adev);
-void amdgpu_ci_stop_smc_clock(struct amdgpu_device *adev);
-void amdgpu_ci_start_smc_clock(struct amdgpu_device *adev);
-bool amdgpu_ci_is_smc_running(struct amdgpu_device *adev);
-PPSMC_Result amdgpu_ci_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg);
-PPSMC_Result amdgpu_ci_wait_for_smc_inactive(struct amdgpu_device *adev);
-int amdgpu_ci_load_smc_ucode(struct amdgpu_device *adev, u32 limit);
-int amdgpu_ci_read_smc_sram_dword(struct amdgpu_device *adev,
- u32 smc_address, u32 *value, u32 limit);
-int amdgpu_ci_write_smc_sram_dword(struct amdgpu_device *adev,
- u32 smc_address, u32 value, u32 limit);
-
-#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_smc.c b/drivers/gpu/drm/amd/amdgpu/ci_smc.c
deleted file mode 100644
index 7eb9069db8e3..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/ci_smc.c
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * Copyright 2011 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Alex Deucher
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "amdgpu.h"
-#include "cikd.h"
-#include "ppsmc.h"
-#include "amdgpu_ucode.h"
-#include "ci_dpm.h"
-
-#include "smu/smu_7_0_1_d.h"
-#include "smu/smu_7_0_1_sh_mask.h"
-
-static int ci_set_smc_sram_address(struct amdgpu_device *adev,
- u32 smc_address, u32 limit)
-{
- if (smc_address & 3)
- return -EINVAL;
- if ((smc_address + 3) > limit)
- return -EINVAL;
-
- WREG32(mmSMC_IND_INDEX_0, smc_address);
- WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
-
- return 0;
-}
-
-int amdgpu_ci_copy_bytes_to_smc(struct amdgpu_device *adev,
- u32 smc_start_address,
- const u8 *src, u32 byte_count, u32 limit)
-{
- unsigned long flags;
- u32 data, original_data;
- u32 addr;
- u32 extra_shift;
- int ret = 0;
-
- if (smc_start_address & 3)
- return -EINVAL;
- if ((smc_start_address + byte_count) > limit)
- return -EINVAL;
-
- addr = smc_start_address;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- while (byte_count >= 4) {
- /* SMC address space is BE */
- data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
-
- ret = ci_set_smc_sram_address(adev, addr, limit);
- if (ret)
- goto done;
-
- WREG32(mmSMC_IND_DATA_0, data);
-
- src += 4;
- byte_count -= 4;
- addr += 4;
- }
-
- /* RMW for the final bytes */
- if (byte_count > 0) {
- data = 0;
-
- ret = ci_set_smc_sram_address(adev, addr, limit);
- if (ret)
- goto done;
-
- original_data = RREG32(mmSMC_IND_DATA_0);
-
- extra_shift = 8 * (4 - byte_count);
-
- while (byte_count > 0) {
- data = (data << 8) + *src++;
- byte_count--;
- }
-
- data <<= extra_shift;
-
- data |= (original_data & ~((~0UL) << extra_shift));
-
- ret = ci_set_smc_sram_address(adev, addr, limit);
- if (ret)
- goto done;
-
- WREG32(mmSMC_IND_DATA_0, data);
- }
-
-done:
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
- return ret;
-}
-
-void amdgpu_ci_start_smc(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-
- tmp &= ~SMC_SYSCON_RESET_CNTL__rst_reg_MASK;
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, tmp);
-}
-
-void amdgpu_ci_reset_smc(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32_SMC(ixSMC_SYSCON_RESET_CNTL);
-
- tmp |= SMC_SYSCON_RESET_CNTL__rst_reg_MASK;
- WREG32_SMC(ixSMC_SYSCON_RESET_CNTL, tmp);
-}
-
-int amdgpu_ci_program_jump_on_start(struct amdgpu_device *adev)
-{
- static u8 data[] = { 0xE0, 0x00, 0x80, 0x40 };
-
- return amdgpu_ci_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1);
-}
-
-void amdgpu_ci_stop_smc_clock(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-
- tmp |= SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK;
-
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, tmp);
-}
-
-void amdgpu_ci_start_smc_clock(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
-
- tmp &= ~SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK;
-
- WREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0, tmp);
-}
-
-bool amdgpu_ci_is_smc_running(struct amdgpu_device *adev)
-{
- u32 clk = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- u32 pc_c = RREG32_SMC(ixSMC_PC_C);
-
- if (!(clk & SMC_SYSCON_CLOCK_CNTL_0__ck_disable_MASK) && (0x20100 <= pc_c))
- return true;
-
- return false;
-}
-
-PPSMC_Result amdgpu_ci_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
-{
- u32 tmp;
- int i;
-
- if (!amdgpu_ci_is_smc_running(adev))
- return PPSMC_Result_Failed;
-
- WREG32(mmSMC_MESSAGE_0, msg);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(mmSMC_RESP_0);
- if (tmp != 0)
- break;
- udelay(1);
- }
- tmp = RREG32(mmSMC_RESP_0);
-
- return (PPSMC_Result)tmp;
-}
-
-PPSMC_Result amdgpu_ci_wait_for_smc_inactive(struct amdgpu_device *adev)
-{
- u32 tmp;
- int i;
-
- if (!amdgpu_ci_is_smc_running(adev))
- return PPSMC_Result_OK;
-
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
- if ((tmp & SMC_SYSCON_CLOCK_CNTL_0__cken_MASK) == 0)
- break;
- udelay(1);
- }
-
- return PPSMC_Result_OK;
-}
-
-int amdgpu_ci_load_smc_ucode(struct amdgpu_device *adev, u32 limit)
-{
- const struct smc_firmware_header_v1_0 *hdr;
- unsigned long flags;
- u32 ucode_start_address;
- u32 ucode_size;
- const u8 *src;
- u32 data;
-
- if (!adev->pm.fw)
- return -EINVAL;
-
- hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data;
- amdgpu_ucode_print_smc_hdr(&hdr->header);
-
- adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
- ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
- ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
- src = (const u8 *)
- (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
-
- if (ucode_size & 3)
- return -EINVAL;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_0, ucode_start_address);
- WREG32_P(mmSMC_IND_ACCESS_CNTL, SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK,
- ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
- while (ucode_size >= 4) {
- /* SMC address space is BE */
- data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
-
- WREG32(mmSMC_IND_DATA_0, data);
-
- src += 4;
- ucode_size -= 4;
- }
- WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
- return 0;
-}
-
-int amdgpu_ci_read_smc_sram_dword(struct amdgpu_device *adev,
- u32 smc_address, u32 *value, u32 limit)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- ret = ci_set_smc_sram_address(adev, smc_address, limit);
- if (ret == 0)
- *value = RREG32(mmSMC_IND_DATA_0);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
- return ret;
-}
-
-int amdgpu_ci_write_smc_sram_dword(struct amdgpu_device *adev,
- u32 smc_address, u32 value, u32 limit)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&adev->smc_idx_lock, flags);
- ret = ci_set_smc_sram_address(adev, smc_address, limit);
- if (ret == 0)
- WREG32(mmSMC_IND_DATA_0, value);
- spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
-
- return ret;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 9d33e5641419..9cd63b4177bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -24,7 +24,10 @@
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include "drmP.h"
+#include <linux/pci.h>
+
+#include <drm/amdgpu_drm.h>
+
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
@@ -65,9 +68,83 @@
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
+#include "amdgpu_dm.h"
#include "amdgpu_amdkfd.h"
-#include "amdgpu_powerplay.h"
-#include "dce_virtual.h"
+#include "amdgpu_vkms.h"
+
+static const struct amdgpu_video_codec_info cik_video_codecs_encode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 2048,
+ .max_height = 1152,
+ .max_pixels_per_frame = 2048 * 1152,
+ .max_level = 0,
+ },
+};
+
+static const struct amdgpu_video_codecs cik_video_codecs_encode =
+{
+ .codec_count = ARRAY_SIZE(cik_video_codecs_encode_array),
+ .codec_array = cik_video_codecs_encode_array,
+};
+
+static const struct amdgpu_video_codec_info cik_video_codecs_decode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
+ .max_width = 2048,
+ .max_height = 1152,
+ .max_pixels_per_frame = 2048 * 1152,
+ .max_level = 3,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
+ .max_width = 2048,
+ .max_height = 1152,
+ .max_pixels_per_frame = 2048 * 1152,
+ .max_level = 5,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 2048,
+ .max_height = 1152,
+ .max_pixels_per_frame = 2048 * 1152,
+ .max_level = 41,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
+ .max_width = 2048,
+ .max_height = 1152,
+ .max_pixels_per_frame = 2048 * 1152,
+ .max_level = 4,
+ },
+};
+
+static const struct amdgpu_video_codecs cik_video_codecs_decode =
+{
+ .codec_count = ARRAY_SIZE(cik_video_codecs_decode_array),
+ .codec_array = cik_video_codecs_decode_array,
+};
+
+static int cik_query_video_codecs(struct amdgpu_device *adev, bool encode,
+ const struct amdgpu_video_codecs **codecs)
+{
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ if (encode)
+ *codecs = &cik_video_codecs_encode;
+ else
+ *codecs = &cik_video_codecs_decode;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
/*
* Indirect registers accessor
@@ -754,74 +831,74 @@ static void cik_init_golden_registers(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_BONAIRE:
- amdgpu_program_register_sequence(adev,
- bonaire_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(bonaire_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- bonaire_golden_registers,
- (const u32)ARRAY_SIZE(bonaire_golden_registers));
- amdgpu_program_register_sequence(adev,
- bonaire_golden_common_registers,
- (const u32)ARRAY_SIZE(bonaire_golden_common_registers));
- amdgpu_program_register_sequence(adev,
- bonaire_golden_spm_registers,
- (const u32)ARRAY_SIZE(bonaire_golden_spm_registers));
+ amdgpu_device_program_register_sequence(adev,
+ bonaire_mgcg_cgcg_init,
+ ARRAY_SIZE(bonaire_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ bonaire_golden_registers,
+ ARRAY_SIZE(bonaire_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ bonaire_golden_common_registers,
+ ARRAY_SIZE(bonaire_golden_common_registers));
+ amdgpu_device_program_register_sequence(adev,
+ bonaire_golden_spm_registers,
+ ARRAY_SIZE(bonaire_golden_spm_registers));
break;
case CHIP_KABINI:
- amdgpu_program_register_sequence(adev,
- kalindi_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- kalindi_golden_registers,
- (const u32)ARRAY_SIZE(kalindi_golden_registers));
- amdgpu_program_register_sequence(adev,
- kalindi_golden_common_registers,
- (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
- amdgpu_program_register_sequence(adev,
- kalindi_golden_spm_registers,
- (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_mgcg_cgcg_init,
+ ARRAY_SIZE(kalindi_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_golden_registers,
+ ARRAY_SIZE(kalindi_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_golden_common_registers,
+ ARRAY_SIZE(kalindi_golden_common_registers));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_golden_spm_registers,
+ ARRAY_SIZE(kalindi_golden_spm_registers));
break;
case CHIP_MULLINS:
- amdgpu_program_register_sequence(adev,
- kalindi_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- godavari_golden_registers,
- (const u32)ARRAY_SIZE(godavari_golden_registers));
- amdgpu_program_register_sequence(adev,
- kalindi_golden_common_registers,
- (const u32)ARRAY_SIZE(kalindi_golden_common_registers));
- amdgpu_program_register_sequence(adev,
- kalindi_golden_spm_registers,
- (const u32)ARRAY_SIZE(kalindi_golden_spm_registers));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_mgcg_cgcg_init,
+ ARRAY_SIZE(kalindi_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ godavari_golden_registers,
+ ARRAY_SIZE(godavari_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_golden_common_registers,
+ ARRAY_SIZE(kalindi_golden_common_registers));
+ amdgpu_device_program_register_sequence(adev,
+ kalindi_golden_spm_registers,
+ ARRAY_SIZE(kalindi_golden_spm_registers));
break;
case CHIP_KAVERI:
- amdgpu_program_register_sequence(adev,
- spectre_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(spectre_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- spectre_golden_registers,
- (const u32)ARRAY_SIZE(spectre_golden_registers));
- amdgpu_program_register_sequence(adev,
- spectre_golden_common_registers,
- (const u32)ARRAY_SIZE(spectre_golden_common_registers));
- amdgpu_program_register_sequence(adev,
- spectre_golden_spm_registers,
- (const u32)ARRAY_SIZE(spectre_golden_spm_registers));
+ amdgpu_device_program_register_sequence(adev,
+ spectre_mgcg_cgcg_init,
+ ARRAY_SIZE(spectre_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ spectre_golden_registers,
+ ARRAY_SIZE(spectre_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ spectre_golden_common_registers,
+ ARRAY_SIZE(spectre_golden_common_registers));
+ amdgpu_device_program_register_sequence(adev,
+ spectre_golden_spm_registers,
+ ARRAY_SIZE(spectre_golden_spm_registers));
break;
case CHIP_HAWAII:
- amdgpu_program_register_sequence(adev,
- hawaii_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(hawaii_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- hawaii_golden_registers,
- (const u32)ARRAY_SIZE(hawaii_golden_registers));
- amdgpu_program_register_sequence(adev,
- hawaii_golden_common_registers,
- (const u32)ARRAY_SIZE(hawaii_golden_common_registers));
- amdgpu_program_register_sequence(adev,
- hawaii_golden_spm_registers,
- (const u32)ARRAY_SIZE(hawaii_golden_spm_registers));
+ amdgpu_device_program_register_sequence(adev,
+ hawaii_mgcg_cgcg_init,
+ ARRAY_SIZE(hawaii_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ hawaii_golden_registers,
+ ARRAY_SIZE(hawaii_golden_registers));
+ amdgpu_device_program_register_sequence(adev,
+ hawaii_golden_common_registers,
+ ARRAY_SIZE(hawaii_golden_common_registers));
+ amdgpu_device_program_register_sequence(adev,
+ hawaii_golden_spm_registers,
+ ARRAY_SIZE(hawaii_golden_spm_registers));
break;
default:
break;
@@ -964,80 +1041,178 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
}
static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
- {mmGRBM_STATUS, false},
- {mmGB_ADDR_CONFIG, false},
- {mmMC_ARB_RAMCFG, false},
- {mmGB_TILE_MODE0, false},
- {mmGB_TILE_MODE1, false},
- {mmGB_TILE_MODE2, false},
- {mmGB_TILE_MODE3, false},
- {mmGB_TILE_MODE4, false},
- {mmGB_TILE_MODE5, false},
- {mmGB_TILE_MODE6, false},
- {mmGB_TILE_MODE7, false},
- {mmGB_TILE_MODE8, false},
- {mmGB_TILE_MODE9, false},
- {mmGB_TILE_MODE10, false},
- {mmGB_TILE_MODE11, false},
- {mmGB_TILE_MODE12, false},
- {mmGB_TILE_MODE13, false},
- {mmGB_TILE_MODE14, false},
- {mmGB_TILE_MODE15, false},
- {mmGB_TILE_MODE16, false},
- {mmGB_TILE_MODE17, false},
- {mmGB_TILE_MODE18, false},
- {mmGB_TILE_MODE19, false},
- {mmGB_TILE_MODE20, false},
- {mmGB_TILE_MODE21, false},
- {mmGB_TILE_MODE22, false},
- {mmGB_TILE_MODE23, false},
- {mmGB_TILE_MODE24, false},
- {mmGB_TILE_MODE25, false},
- {mmGB_TILE_MODE26, false},
- {mmGB_TILE_MODE27, false},
- {mmGB_TILE_MODE28, false},
- {mmGB_TILE_MODE29, false},
- {mmGB_TILE_MODE30, false},
- {mmGB_TILE_MODE31, false},
- {mmGB_MACROTILE_MODE0, false},
- {mmGB_MACROTILE_MODE1, false},
- {mmGB_MACROTILE_MODE2, false},
- {mmGB_MACROTILE_MODE3, false},
- {mmGB_MACROTILE_MODE4, false},
- {mmGB_MACROTILE_MODE5, false},
- {mmGB_MACROTILE_MODE6, false},
- {mmGB_MACROTILE_MODE7, false},
- {mmGB_MACROTILE_MODE8, false},
- {mmGB_MACROTILE_MODE9, false},
- {mmGB_MACROTILE_MODE10, false},
- {mmGB_MACROTILE_MODE11, false},
- {mmGB_MACROTILE_MODE12, false},
- {mmGB_MACROTILE_MODE13, false},
- {mmGB_MACROTILE_MODE14, false},
- {mmGB_MACROTILE_MODE15, false},
- {mmCC_RB_BACKEND_DISABLE, false, true},
- {mmGC_USER_RB_BACKEND_DISABLE, false, true},
- {mmGB_BACKEND_MAP, false, false},
- {mmPA_SC_RASTER_CONFIG, false, true},
- {mmPA_SC_RASTER_CONFIG_1, false, true},
+ {mmGRBM_STATUS},
+ {mmGRBM_STATUS2},
+ {mmGRBM_STATUS_SE0},
+ {mmGRBM_STATUS_SE1},
+ {mmGRBM_STATUS_SE2},
+ {mmGRBM_STATUS_SE3},
+ {mmSRBM_STATUS},
+ {mmSRBM_STATUS2},
+ {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET},
+ {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET},
+ {mmCP_STAT},
+ {mmCP_STALLED_STAT1},
+ {mmCP_STALLED_STAT2},
+ {mmCP_STALLED_STAT3},
+ {mmCP_CPF_BUSY_STAT},
+ {mmCP_CPF_STALLED_STAT1},
+ {mmCP_CPF_STATUS},
+ {mmCP_CPC_BUSY_STAT},
+ {mmCP_CPC_STALLED_STAT1},
+ {mmCP_CPC_STATUS},
+ {mmGB_ADDR_CONFIG},
+ {mmMC_ARB_RAMCFG},
+ {mmGB_TILE_MODE0},
+ {mmGB_TILE_MODE1},
+ {mmGB_TILE_MODE2},
+ {mmGB_TILE_MODE3},
+ {mmGB_TILE_MODE4},
+ {mmGB_TILE_MODE5},
+ {mmGB_TILE_MODE6},
+ {mmGB_TILE_MODE7},
+ {mmGB_TILE_MODE8},
+ {mmGB_TILE_MODE9},
+ {mmGB_TILE_MODE10},
+ {mmGB_TILE_MODE11},
+ {mmGB_TILE_MODE12},
+ {mmGB_TILE_MODE13},
+ {mmGB_TILE_MODE14},
+ {mmGB_TILE_MODE15},
+ {mmGB_TILE_MODE16},
+ {mmGB_TILE_MODE17},
+ {mmGB_TILE_MODE18},
+ {mmGB_TILE_MODE19},
+ {mmGB_TILE_MODE20},
+ {mmGB_TILE_MODE21},
+ {mmGB_TILE_MODE22},
+ {mmGB_TILE_MODE23},
+ {mmGB_TILE_MODE24},
+ {mmGB_TILE_MODE25},
+ {mmGB_TILE_MODE26},
+ {mmGB_TILE_MODE27},
+ {mmGB_TILE_MODE28},
+ {mmGB_TILE_MODE29},
+ {mmGB_TILE_MODE30},
+ {mmGB_TILE_MODE31},
+ {mmGB_MACROTILE_MODE0},
+ {mmGB_MACROTILE_MODE1},
+ {mmGB_MACROTILE_MODE2},
+ {mmGB_MACROTILE_MODE3},
+ {mmGB_MACROTILE_MODE4},
+ {mmGB_MACROTILE_MODE5},
+ {mmGB_MACROTILE_MODE6},
+ {mmGB_MACROTILE_MODE7},
+ {mmGB_MACROTILE_MODE8},
+ {mmGB_MACROTILE_MODE9},
+ {mmGB_MACROTILE_MODE10},
+ {mmGB_MACROTILE_MODE11},
+ {mmGB_MACROTILE_MODE12},
+ {mmGB_MACROTILE_MODE13},
+ {mmGB_MACROTILE_MODE14},
+ {mmGB_MACROTILE_MODE15},
+ {mmCC_RB_BACKEND_DISABLE, true},
+ {mmGC_USER_RB_BACKEND_DISABLE, true},
+ {mmGB_BACKEND_MAP, false},
+ {mmPA_SC_RASTER_CONFIG, true},
+ {mmPA_SC_RASTER_CONFIG_1, true},
};
-static uint32_t cik_read_indexed_register(struct amdgpu_device *adev,
- u32 se_num, u32 sh_num,
- u32 reg_offset)
+
+static uint32_t cik_get_register_value(struct amdgpu_device *adev,
+ bool indexed, u32 se_num,
+ u32 sh_num, u32 reg_offset)
{
- uint32_t val;
+ if (indexed) {
+ uint32_t val;
+ unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
+ unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
+
+ switch (reg_offset) {
+ case mmCC_RB_BACKEND_DISABLE:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
+ case mmGC_USER_RB_BACKEND_DISABLE:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
+ case mmPA_SC_RASTER_CONFIG:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
+ case mmPA_SC_RASTER_CONFIG_1:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
+ }
- mutex_lock(&adev->grbm_idx_mutex);
- if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+ mutex_lock(&adev->grbm_idx_mutex);
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0);
- val = RREG32(reg_offset);
+ val = RREG32(reg_offset);
- if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- return val;
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ return val;
+ } else {
+ unsigned idx;
+
+ switch (reg_offset) {
+ case mmGB_ADDR_CONFIG:
+ return adev->gfx.config.gb_addr_config;
+ case mmMC_ARB_RAMCFG:
+ return adev->gfx.config.mc_arb_ramcfg;
+ case mmGB_TILE_MODE0:
+ case mmGB_TILE_MODE1:
+ case mmGB_TILE_MODE2:
+ case mmGB_TILE_MODE3:
+ case mmGB_TILE_MODE4:
+ case mmGB_TILE_MODE5:
+ case mmGB_TILE_MODE6:
+ case mmGB_TILE_MODE7:
+ case mmGB_TILE_MODE8:
+ case mmGB_TILE_MODE9:
+ case mmGB_TILE_MODE10:
+ case mmGB_TILE_MODE11:
+ case mmGB_TILE_MODE12:
+ case mmGB_TILE_MODE13:
+ case mmGB_TILE_MODE14:
+ case mmGB_TILE_MODE15:
+ case mmGB_TILE_MODE16:
+ case mmGB_TILE_MODE17:
+ case mmGB_TILE_MODE18:
+ case mmGB_TILE_MODE19:
+ case mmGB_TILE_MODE20:
+ case mmGB_TILE_MODE21:
+ case mmGB_TILE_MODE22:
+ case mmGB_TILE_MODE23:
+ case mmGB_TILE_MODE24:
+ case mmGB_TILE_MODE25:
+ case mmGB_TILE_MODE26:
+ case mmGB_TILE_MODE27:
+ case mmGB_TILE_MODE28:
+ case mmGB_TILE_MODE29:
+ case mmGB_TILE_MODE30:
+ case mmGB_TILE_MODE31:
+ idx = (reg_offset - mmGB_TILE_MODE0);
+ return adev->gfx.config.tile_mode_array[idx];
+ case mmGB_MACROTILE_MODE0:
+ case mmGB_MACROTILE_MODE1:
+ case mmGB_MACROTILE_MODE2:
+ case mmGB_MACROTILE_MODE3:
+ case mmGB_MACROTILE_MODE4:
+ case mmGB_MACROTILE_MODE5:
+ case mmGB_MACROTILE_MODE6:
+ case mmGB_MACROTILE_MODE7:
+ case mmGB_MACROTILE_MODE8:
+ case mmGB_MACROTILE_MODE9:
+ case mmGB_MACROTILE_MODE10:
+ case mmGB_MACROTILE_MODE11:
+ case mmGB_MACROTILE_MODE12:
+ case mmGB_MACROTILE_MODE13:
+ case mmGB_MACROTILE_MODE14:
+ case mmGB_MACROTILE_MODE15:
+ idx = (reg_offset - mmGB_MACROTILE_MODE0);
+ return adev->gfx.config.macrotile_mode_array[idx];
+ default:
+ return RREG32(reg_offset);
+ }
+ }
}
static int cik_read_register(struct amdgpu_device *adev, u32 se_num,
@@ -1047,14 +1222,13 @@ static int cik_read_register(struct amdgpu_device *adev, u32 se_num,
*value = 0;
for (i = 0; i < ARRAY_SIZE(cik_allowed_read_registers); i++) {
+ bool indexed = cik_allowed_read_registers[i].grbm_indexed;
+
if (reg_offset != cik_allowed_read_registers[i].reg_offset)
continue;
- if (!cik_allowed_read_registers[i].untouched)
- *value = cik_allowed_read_registers[i].grbm_indexed ?
- cik_read_indexed_register(adev, se_num,
- sh_num, reg_offset) :
- RREG32(reg_offset);
+ *value = cik_get_register_value(adev, indexed, se_num, sh_num,
+ reg_offset);
return 0;
}
return -EINVAL;
@@ -1153,13 +1327,22 @@ static void kv_restore_regs_for_reset(struct amdgpu_device *adev,
WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute);
}
-static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
+/**
+ * cik_asic_pci_config_reset - soft reset GPU
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Use PCI Config method to reset the GPU.
+ *
+ * Returns 0 for success.
+ */
+static int cik_asic_pci_config_reset(struct amdgpu_device *adev)
{
struct kv_reset_save_regs kv_save = { 0 };
u32 i;
int r = -EINVAL;
- dev_info(adev->dev, "GPU pci config reset\n");
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
if (adev->flags & AMD_IS_APU)
kv_save_regs_for_reset(adev, &kv_save);
@@ -1167,7 +1350,7 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
/* disable BM */
pci_clear_master(adev->pdev);
/* reset */
- amdgpu_pci_config_reset(adev);
+ amdgpu_device_pci_config_reset(adev);
udelay(100);
@@ -1187,9 +1370,51 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
kv_restore_regs_for_reset(adev, &kv_save);
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
+
return r;
}
+static int cik_asic_supports_baco(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ return amdgpu_dpm_is_baco_supported(adev);
+ default:
+ return 0;
+ }
+}
+
+static enum amd_reset_method
+cik_asic_reset_method(struct amdgpu_device *adev)
+{
+ bool baco_reset;
+
+ if (amdgpu_reset_method == AMD_RESET_METHOD_LEGACY ||
+ amdgpu_reset_method == AMD_RESET_METHOD_BACO)
+ return amdgpu_reset_method;
+
+ if (amdgpu_reset_method != -1)
+ dev_warn(adev->dev, "Specified reset:%d isn't supported, using AUTO instead.\n",
+ amdgpu_reset_method);
+
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ baco_reset = cik_asic_supports_baco(adev);
+ break;
+ default:
+ baco_reset = false;
+ break;
+ }
+
+ if (baco_reset)
+ return AMD_RESET_METHOD_BACO;
+ else
+ return AMD_RESET_METHOD_LEGACY;
+}
+
/**
* cik_asic_reset - soft reset GPU
*
@@ -1203,11 +1428,17 @@ static int cik_asic_reset(struct amdgpu_device *adev)
{
int r;
- amdgpu_atombios_scratch_regs_engine_hung(adev, true);
-
- r = cik_gpu_pci_config_reset(adev);
+ /* APUs don't have full asic reset */
+ if (adev->flags & AMD_IS_APU)
+ return 0;
- amdgpu_atombios_scratch_regs_engine_hung(adev, false);
+ if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
+ dev_info(adev->dev, "BACO reset\n");
+ r = amdgpu_dpm_baco_reset(adev);
+ } else {
+ dev_info(adev->dev, "PCI CONFIG reset\n");
+ r = cik_asic_pci_config_reset(adev);
+ }
return r;
}
@@ -1299,7 +1530,6 @@ static int cik_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
{
struct pci_dev *root = adev->pdev->bus->self;
- int bridge_pos, gpu_pos;
u32 speed_cntl, current_data_rate;
int i;
u16 tmp16;
@@ -1334,12 +1564,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
DRM_INFO("enabling PCIE gen 2 link speeds, disable with amdgpu.pcie_gen2=0\n");
}
- bridge_pos = pci_pcie_cap(root);
- if (!bridge_pos)
- return;
-
- gpu_pos = pci_pcie_cap(adev->pdev);
- if (!gpu_pos)
+ if (!pci_is_pcie(root) || !pci_is_pcie(adev->pdev))
return;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
@@ -1349,14 +1574,8 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
u16 bridge_cfg2, gpu_cfg2;
u32 max_lw, current_lw, tmp;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
-
- tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
-
- tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_set_word(root, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
+ pcie_capability_set_word(adev->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_HAWD);
tmp = RREG32_PCIE(ixPCIE_LC_STATUS1);
max_lw = (tmp & PCIE_LC_STATUS1__LC_DETECTED_LINK_WIDTH_MASK) >>
@@ -1380,15 +1599,23 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
for (i = 0; i < 10; i++) {
/* check status */
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_DEVSTA,
+ &tmp16);
if (tmp16 & PCI_EXP_DEVSTA_TRPND)
break;
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL,
+ &bridge_cfg);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL,
+ &gpu_cfg);
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+ pcie_capability_read_word(root, PCI_EXP_LNKCTL2,
+ &bridge_cfg2);
+ pcie_capability_read_word(adev->pdev,
+ PCI_EXP_LNKCTL2,
+ &gpu_cfg2);
tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
tmp |= PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
@@ -1398,29 +1625,31 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
tmp |= PCIE_LC_CNTL4__LC_REDO_EQ_MASK;
WREG32_PCIE(ixPCIE_LC_CNTL4, tmp);
- mdelay(100);
+ msleep(100);
/* linkctl */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
- tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
- tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
-
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
- tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
- tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+ pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_HAWD,
+ bridge_cfg &
+ PCI_EXP_LNKCTL_HAWD);
+ pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_HAWD,
+ gpu_cfg &
+ PCI_EXP_LNKCTL_HAWD);
/* linkctl2 */
- pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
-
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~((1 << 4) | (7 << 9));
- tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ pcie_capability_clear_and_set_word(root, PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN,
+ bridge_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
+ pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN,
+ gpu_cfg2 &
+ (PCI_EXP_LNKCTL2_ENTER_COMP |
+ PCI_EXP_LNKCTL2_TX_MARGIN));
tmp = RREG32_PCIE(ixPCIE_LC_CNTL4);
tmp &= ~PCIE_LC_CNTL4__LC_SET_QUIESCE_MASK;
@@ -1435,15 +1664,15 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
speed_cntl &= ~PCIE_LC_SPEED_CNTL__LC_FORCE_DIS_SW_SPEED_CHANGE_MASK;
WREG32_PCIE(ixPCIE_LC_SPEED_CNTL, speed_cntl);
- pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
- tmp16 &= ~0xf;
+ tmp16 = 0;
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- tmp16 |= 3; /* gen3 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_8_0GT; /* gen3 */
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
- tmp16 |= 2; /* gen2 */
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_5_0GT; /* gen2 */
else
- tmp16 |= 1; /* gen1 */
- pci_write_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+ tmp16 |= PCI_EXP_LNKCTL2_TLS_2_5GT; /* gen1 */
+ pcie_capability_clear_and_set_word(adev->pdev, PCI_EXP_LNKCTL2,
+ PCI_EXP_LNKCTL2_TLS, tmp16);
speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
speed_cntl |= PCIE_LC_SPEED_CNTL__LC_INITIATE_LINK_SPEED_CHANGE_MASK;
@@ -1463,16 +1692,12 @@ static void cik_program_aspm(struct amdgpu_device *adev)
bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
bool disable_clkreq = false;
- if (amdgpu_aspm == 0)
+ if (!amdgpu_device_should_use_aspm(adev))
return;
if (pci_is_root_bus(adev->pdev->bus))
return;
- /* XXX double check APUs */
- if (adev->flags & AMD_IS_APU)
- return;
-
orig = data = RREG32_PCIE(ixPCIE_LC_N_FTS_CNTL);
data &= ~PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS_MASK;
data |= (0x24 << PCIE_LC_N_FTS_CNTL__LC_XMIT_N_FTS__SHIFT) |
@@ -1630,10 +1855,110 @@ static uint32_t cik_get_rev_id(struct amdgpu_device *adev)
>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
}
-static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
+static void cik_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg) {
+ WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
+ RREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL);
+ } else {
+ amdgpu_ring_emit_wreg(ring, mmHDP_MEM_COHERENCY_FLUSH_CNTL, 1);
+ }
+}
+
+static void cik_invalidate_hdp(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
+{
+ if (!ring || !ring->funcs->emit_wreg) {
+ WREG32(mmHDP_DEBUG0, 1);
+ RREG32(mmHDP_DEBUG0);
+ } else {
+ amdgpu_ring_emit_wreg(ring, mmHDP_DEBUG0, 1);
+ }
+}
+
+static bool cik_need_full_reset(struct amdgpu_device *adev)
+{
+ /* change this when we support soft reset */
+ return true;
+}
+
+static void cik_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
+ uint64_t *count1)
+{
+ uint32_t perfctr = 0;
+ uint64_t cnt0_of, cnt1_of;
+ int tmp;
+
+ /* This reports 0 on APUs, so return to avoid writing/reading registers
+ * that may or may not be different from their GPU counterparts
+ */
+ if (adev->flags & AMD_IS_APU)
+ return;
+
+ /* Set the 2 events that we wish to watch, defined above */
+ /* Reg 40 is # received msgs, Reg 104 is # of posted requests sent */
+ perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40);
+ perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104);
+
+ /* Write to enable desired perf counters */
+ WREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK, perfctr);
+ /* Zero out and enable the perf counters
+ * Write 0x5:
+ * Bit 0 = Start all counters(1)
+ * Bit 2 = Global counter reset enable(1)
+ */
+ WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000005);
+
+ msleep(1000);
+
+ /* Load the shadow and disable the perf counters
+ * Write 0x2:
+ * Bit 0 = Stop counters(0)
+ * Bit 1 = Load the shadow counters(1)
+ */
+ WREG32_PCIE(ixPCIE_PERF_COUNT_CNTL, 0x00000002);
+
+ /* Read register values to get any >32bit overflow */
+ tmp = RREG32_PCIE(ixPCIE_PERF_CNTL_TXCLK);
+ cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER);
+ cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER);
+
+ /* Get the values and add the overflow */
+ *count0 = RREG32_PCIE(ixPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32);
+ *count1 = RREG32_PCIE(ixPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32);
+}
+
+static bool cik_need_reset_on_init(struct amdgpu_device *adev)
+{
+ u32 clock_cntl, pc;
+
+ if (adev->flags & AMD_IS_APU)
+ return false;
+
+ /* check if the SMC is already running */
+ clock_cntl = RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0);
+ pc = RREG32_SMC(ixSMC_PC_C);
+ if ((0 == REG_GET_FIELD(clock_cntl, SMC_SYSCON_CLOCK_CNTL_0, ck_disable)) &&
+ (0x20100 <= pc))
+ return true;
+
+ return false;
+}
+
+static uint64_t cik_get_pcie_replay_count(struct amdgpu_device *adev)
+{
+ uint64_t nak_r, nak_g;
+
+ /* Get the number of NAKs received and generated */
+ nak_r = RREG32_PCIE(ixPCIE_RX_NUM_NAK);
+ nak_g = RREG32_PCIE(ixPCIE_RX_NUM_NAK_GENERATED);
+
+ /* Add the total number of NAKs, i.e the number of replays */
+ return (nak_r + nak_g);
+}
+
+static void cik_pre_asic_init(struct amdgpu_device *adev)
{
- if (is_virtual_machine()) /* passthrough mode */
- adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
}
static const struct amdgpu_asic_funcs cik_asic_funcs =
@@ -1642,16 +1967,27 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.read_bios_from_rom = &cik_read_bios_from_rom,
.read_register = &cik_read_register,
.reset = &cik_asic_reset,
+ .reset_method = &cik_asic_reset_method,
.set_vga_state = &cik_vga_set_state,
.get_xclk = &cik_get_xclk,
.set_uvd_clocks = &cik_set_uvd_clocks,
.set_vce_clocks = &cik_set_vce_clocks,
.get_config_memsize = &cik_get_config_memsize,
+ .flush_hdp = &cik_flush_hdp,
+ .invalidate_hdp = &cik_invalidate_hdp,
+ .need_full_reset = &cik_need_full_reset,
+ .init_doorbell_index = &legacy_doorbell_index_init,
+ .get_pcie_usage = &cik_get_pcie_usage,
+ .need_reset_on_init = &cik_need_reset_on_init,
+ .get_pcie_replay_count = &cik_get_pcie_replay_count,
+ .supports_baco = &cik_asic_supports_baco,
+ .pre_asic_init = &cik_pre_asic_init,
+ .query_video_codecs = &cik_query_video_codecs,
};
-static int cik_common_early_init(void *handle)
+static int cik_common_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->smc_rreg = &cik_smc_rreg;
adev->smc_wreg = &cik_smc_wreg;
@@ -1785,26 +2121,12 @@ static int cik_common_early_init(void *handle)
return -EINVAL;
}
- adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
-
- amdgpu_get_pcie_info(adev);
-
- return 0;
-}
-
-static int cik_common_sw_init(void *handle)
-{
- return 0;
-}
-
-static int cik_common_sw_fini(void *handle)
-{
return 0;
}
-static int cik_common_hw_init(void *handle)
+static int cik_common_hw_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
/* move the golden regs per IP block */
cik_init_golden_registers(adev);
@@ -1816,55 +2138,36 @@ static int cik_common_hw_init(void *handle)
return 0;
}
-static int cik_common_hw_fini(void *handle)
+static int cik_common_hw_fini(struct amdgpu_ip_block *ip_block)
{
return 0;
}
-static int cik_common_suspend(void *handle)
+static int cik_common_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- amdgpu_amdkfd_suspend(adev);
-
- return cik_common_hw_fini(adev);
-}
-
-static int cik_common_resume(void *handle)
-{
- int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- r = cik_common_hw_init(adev);
- if (r)
- return r;
-
- return amdgpu_amdkfd_resume(adev);
+ return cik_common_hw_init(ip_block);
}
-static bool cik_common_is_idle(void *handle)
+static bool cik_common_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
-static int cik_common_wait_for_idle(void *handle)
-{
- return 0;
-}
-static int cik_common_soft_reset(void *handle)
+
+static int cik_common_soft_reset(struct amdgpu_ip_block *ip_block)
{
/* XXX hard reset?? */
return 0;
}
-static int cik_common_set_clockgating_state(void *handle,
+static int cik_common_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int cik_common_set_powergating_state(void *handle,
+static int cik_common_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -1873,15 +2176,10 @@ static int cik_common_set_powergating_state(void *handle,
static const struct amd_ip_funcs cik_common_ip_funcs = {
.name = "cik_common",
.early_init = cik_common_early_init,
- .late_init = NULL,
- .sw_init = cik_common_sw_init,
- .sw_fini = cik_common_sw_fini,
.hw_init = cik_common_hw_init,
.hw_fini = cik_common_hw_fini,
- .suspend = cik_common_suspend,
.resume = cik_common_resume,
.is_idle = cik_common_is_idle,
- .wait_for_idle = cik_common_wait_for_idle,
.soft_reset = cik_common_soft_reset,
.set_clockgating_state = cik_common_set_clockgating_state,
.set_powergating_state = cik_common_set_powergating_state,
@@ -1898,65 +2196,80 @@ static const struct amdgpu_ip_block_version cik_common_ip_block =
int cik_set_ip_blocks(struct amdgpu_device *adev)
{
- cik_detect_hw_virtualization(adev);
-
switch (adev->asic_type) {
case CHIP_BONAIRE:
- amdgpu_ip_block_add(adev, &cik_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
- amdgpu_ip_block_add(adev, &cik_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
+#endif
else
- amdgpu_ip_block_add(adev, &dce_v8_2_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
- amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
- amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v8_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
break;
case CHIP_HAWAII:
- amdgpu_ip_block_add(adev, &cik_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
- amdgpu_ip_block_add(adev, &cik_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v7_3_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
+#endif
else
- amdgpu_ip_block_add(adev, &dce_v8_5_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v7_3_ip_block);
- amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
- amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v8_5_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
break;
case CHIP_KAVERI:
- amdgpu_ip_block_add(adev, &cik_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
- amdgpu_ip_block_add(adev, &cik_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v7_1_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
+#endif
else
- amdgpu_ip_block_add(adev, &dce_v8_1_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v7_1_ip_block);
- amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
- amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v8_1_ip_block);
+
+ amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
break;
case CHIP_KABINI:
case CHIP_MULLINS:
- amdgpu_ip_block_add(adev, &cik_common_ip_block);
- amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block);
- amdgpu_ip_block_add(adev, &cik_ih_ip_block);
- amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_ih_ip_block);
+ amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
+ amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
if (adev->enable_virtual_display)
- amdgpu_ip_block_add(adev, &dce_virtual_ip_block);
+ amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ else if (amdgpu_device_has_dc_support(adev))
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
+#endif
else
- amdgpu_ip_block_add(adev, &dce_v8_3_ip_block);
- amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block);
- amdgpu_ip_block_add(adev, &cik_sdma_ip_block);
- amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block);
- amdgpu_ip_block_add(adev, &vce_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &dce_v8_3_ip_block);
+ amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block);
+ amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block);
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h
index c4989f51ecef..f91ab4c246b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik.h
@@ -24,8 +24,12 @@
#ifndef __CIK_H__
#define __CIK_H__
+#define CIK_FLUSH_GPU_TLB_NUM_WREG 3
+
void cik_srbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
int cik_set_ip_blocks(struct amdgpu_device *adev);
+void legacy_doorbell_index_init(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index c57c3f18af01..41f4705bdbbd 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -20,7 +20,9 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "drmP.h"
+
+#include <linux/pci.h>
+
#include "amdgpu.h"
#include "amdgpu_ih.h"
#include "cikd.h"
@@ -103,15 +105,15 @@ static void cik_ih_disable_interrupts(struct amdgpu_device *adev)
*/
static int cik_ih_irq_init(struct amdgpu_device *adev)
{
+ struct amdgpu_ih_ring *ih = &adev->irq.ih;
int rb_bufsz;
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
- u64 wptr_off;
/* disable irqs */
cik_ih_disable_interrupts(adev);
/* setup interrupt control */
- WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8);
+ WREG32(mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
@@ -131,9 +133,8 @@ static int cik_ih_irq_init(struct amdgpu_device *adev)
ih_rb_cntl |= IH_RB_CNTL__WPTR_WRITEBACK_ENABLE_MASK;
/* set the writeback address whether it's enabled or not */
- wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
- WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
- WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
+ WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
+ WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
WREG32(mmIH_RB_CNTL, ih_rb_cntl);
@@ -176,6 +177,7 @@ static void cik_ih_irq_disable(struct amdgpu_device *adev)
* cik_ih_get_wptr - get the IH ring buffer wptr
*
* @adev: amdgpu_device pointer
+ * @ih: IH ring buffer to fetch wptr
*
* Get the IH ring buffer wptr from either the register
* or the writeback memory buffer (CIK). Also check for
@@ -183,11 +185,12 @@ static void cik_ih_irq_disable(struct amdgpu_device *adev)
* Used by cik_irq_process().
* Returns the value of the wptr.
*/
-static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
+static u32 cik_ih_get_wptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
{
u32 wptr, tmp;
- wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
+ wptr = le32_to_cpu(*ih->wptr_cpu);
if (wptr & IH_RB_WPTR__RB_OVERFLOW_MASK) {
wptr &= ~IH_RB_WPTR__RB_OVERFLOW_MASK;
@@ -196,13 +199,19 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
* this should allow us to catchup.
*/
dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
- wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask);
- adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask;
+ wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
+ ih->rptr = (wptr + 16) & ih->ptr_mask;
tmp = RREG32(mmIH_RB_CNTL);
tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
WREG32(mmIH_RB_CNTL, tmp);
+
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
+ WREG32(mmIH_RB_CNTL, tmp);
}
- return (wptr & adev->irq.ih.ptr_mask);
+ return (wptr & ih->ptr_mask);
}
/* CIK IV Ring
@@ -237,43 +246,46 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
* position and also advance the position.
*/
static void cik_ih_decode_iv(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih,
struct amdgpu_iv_entry *entry)
{
/* wptr/rptr are in bytes! */
- u32 ring_index = adev->irq.ih.rptr >> 2;
+ u32 ring_index = ih->rptr >> 2;
uint32_t dw[4];
- dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
- dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
- dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
- dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+ dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
+ dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
+ dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
+ dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
- entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
+ entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
- entry->vm_id = (dw[2] >> 8) & 0xff;
- entry->pas_id = (dw[2] >> 16) & 0xffff;
+ entry->vmid = (dw[2] >> 8) & 0xff;
+ entry->pasid = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */
- adev->irq.ih.rptr += 16;
+ ih->rptr += 16;
}
/**
* cik_ih_set_rptr - set the IH ring buffer rptr
*
* @adev: amdgpu_device pointer
+ * @ih: IH ring buffer to set wptr
*
* Set the IH ring buffer rptr.
*/
-static void cik_ih_set_rptr(struct amdgpu_device *adev)
+static void cik_ih_set_rptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
{
- WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr);
+ WREG32(mmIH_RB_RPTR, ih->rptr);
}
-static int cik_ih_early_init(void *handle)
+static int cik_ih_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
ret = amdgpu_irq_add_domain(adev);
@@ -285,12 +297,12 @@ static int cik_ih_early_init(void *handle)
return 0;
}
-static int cik_ih_sw_init(void *handle)
+static int cik_ih_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
- r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
if (r)
return r;
@@ -299,55 +311,43 @@ static int cik_ih_sw_init(void *handle)
return r;
}
-static int cik_ih_sw_fini(void *handle)
+static int cik_ih_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
- amdgpu_irq_fini(adev);
- amdgpu_ih_ring_fini(adev);
+ amdgpu_irq_fini_sw(adev);
amdgpu_irq_remove_domain(adev);
return 0;
}
-static int cik_ih_hw_init(void *handle)
+static int cik_ih_hw_init(struct amdgpu_ip_block *ip_block)
{
- int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- r = cik_ih_irq_init(adev);
- if (r)
- return r;
+ struct amdgpu_device *adev = ip_block->adev;
- return 0;
+ return cik_ih_irq_init(adev);
}
-static int cik_ih_hw_fini(void *handle)
+static int cik_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- cik_ih_irq_disable(adev);
+ cik_ih_irq_disable(ip_block->adev);
return 0;
}
-static int cik_ih_suspend(void *handle)
+static int cik_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return cik_ih_hw_fini(adev);
+ return cik_ih_hw_fini(ip_block);
}
-static int cik_ih_resume(void *handle)
+static int cik_ih_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return cik_ih_hw_init(adev);
+ return cik_ih_hw_init(ip_block);
}
-static bool cik_ih_is_idle(void *handle)
+static bool cik_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
@@ -356,11 +356,11 @@ static bool cik_ih_is_idle(void *handle)
return true;
}
-static int cik_ih_wait_for_idle(void *handle)
+static int cik_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
@@ -372,9 +372,9 @@ static int cik_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int cik_ih_soft_reset(void *handle)
+static int cik_ih_soft_reset(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 srbm_soft_reset = 0;
u32 tmp = RREG32(mmSRBM_STATUS);
@@ -402,13 +402,13 @@ static int cik_ih_soft_reset(void *handle)
return 0;
}
-static int cik_ih_set_clockgating_state(void *handle,
+static int cik_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int cik_ih_set_powergating_state(void *handle,
+static int cik_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -417,7 +417,6 @@ static int cik_ih_set_powergating_state(void *handle,
static const struct amd_ip_funcs cik_ih_ip_funcs = {
.name = "cik_ih",
.early_init = cik_ih_early_init,
- .late_init = NULL,
.sw_init = cik_ih_sw_init,
.sw_fini = cik_ih_sw_fini,
.hw_init = cik_ih_hw_init,
@@ -439,12 +438,10 @@ static const struct amdgpu_ih_funcs cik_ih_funcs = {
static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev)
{
- if (adev->irq.ih_funcs == NULL)
- adev->irq.ih_funcs = &cik_ih_funcs;
+ adev->irq.ih_funcs = &cik_ih_funcs;
}
-const struct amdgpu_ip_block_version cik_ih_ip_block =
-{
+const struct amdgpu_ip_block_version cik_ih_ip_block = {
.type = AMD_IP_BLOCK_TYPE_IH,
.major = 2,
.minor = 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index c216e16826c9..9e8715b4739d 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -21,8 +21,10 @@
*
* Authors: Alex Deucher
*/
+
#include <linux/firmware.h>
-#include <drm/drmP.h>
+#include <linux/module.h>
+
#include "amdgpu.h"
#include "amdgpu_ucode.h"
#include "amdgpu_trace.h"
@@ -52,29 +54,27 @@ static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev);
static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev);
-static int cik_sdma_soft_reset(void *handle);
-
-MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
-MODULE_FIRMWARE("radeon/bonaire_sdma1.bin");
-MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
-MODULE_FIRMWARE("radeon/hawaii_sdma1.bin");
-MODULE_FIRMWARE("radeon/kaveri_sdma.bin");
-MODULE_FIRMWARE("radeon/kaveri_sdma1.bin");
-MODULE_FIRMWARE("radeon/kabini_sdma.bin");
-MODULE_FIRMWARE("radeon/kabini_sdma1.bin");
-MODULE_FIRMWARE("radeon/mullins_sdma.bin");
-MODULE_FIRMWARE("radeon/mullins_sdma1.bin");
+static int cik_sdma_soft_reset(struct amdgpu_ip_block *ip_block);
u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
+MODULE_FIRMWARE("amdgpu/bonaire_sdma.bin");
+MODULE_FIRMWARE("amdgpu/bonaire_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_sdma.bin");
+MODULE_FIRMWARE("amdgpu/hawaii_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_sdma.bin");
+MODULE_FIRMWARE("amdgpu/kaveri_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/kabini_sdma.bin");
+MODULE_FIRMWARE("amdgpu/kabini_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/mullins_sdma.bin");
+MODULE_FIRMWARE("amdgpu/mullins_sdma1.bin");
static void cik_sdma_free_microcode(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
- }
+
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
/*
@@ -106,7 +106,6 @@ static void cik_sdma_free_microcode(struct amdgpu_device *adev)
static int cik_sdma_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
- char fw_name[30];
int err = 0, i;
DRM_DEBUG("\n");
@@ -132,21 +131,22 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev)
for (i = 0; i < adev->sdma.num_instances; i++) {
if (i == 0)
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_sdma.bin", chip_name);
else
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_sdma1.bin", chip_name);
- err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw,
+ AMDGPU_UCODE_REQUIRED,
+ "amdgpu/%s_sdma1.bin", chip_name);
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
}
out:
if (err) {
- pr_err("cik_sdma: Failed to load firmware \"%s\"\n", fw_name);
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
- }
+ pr_err("cik_sdma: Failed to load firmware \"%s_sdma%s.bin\"\n",
+ chip_name, i == 0 ? "" : "1");
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
return err;
}
@@ -162,7 +162,7 @@ static uint64_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
{
u32 rptr;
- rptr = ring->adev->wb.wb[ring->rptr_offs];
+ rptr = *ring->rptr_cpu_addr;
return (rptr & 0x3fffc) >> 2;
}
@@ -177,9 +177,8 @@ static uint64_t cik_sdma_ring_get_rptr(struct amdgpu_ring *ring)
static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
- return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
+ return (RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) & 0x3fffc) >> 2;
}
/**
@@ -192,15 +191,14 @@ static uint64_t cik_sdma_ring_get_wptr(struct amdgpu_ring *ring)
static void cik_sdma_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
- WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[me],
- (lower_32_bits(ring->wptr) << 2) & 0x3fffc);
+ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me],
+ (ring->wptr << 2) & 0x3fffc);
}
static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
int i;
for (i = 0; i < count; i++)
@@ -215,18 +213,22 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
* cik_sdma_ring_emit_ib - Schedule an IB on the DMA engine
*
* @ring: amdgpu ring pointer
+ * @job: job to retrive vmid from
* @ib: IB object to schedule
+ * @flags: unused
*
* Schedule an IB in the DMA ring (CIK).
*/
static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
+ struct amdgpu_job *job,
struct amdgpu_ib *ib,
- unsigned vm_id, bool ctx_switch)
+ uint32_t flags)
{
- u32 extra_bits = vm_id & 0xf;
+ unsigned vmid = AMDGPU_JOB_GET_VMID(job);
+ u32 extra_bits = vmid & 0xf;
/* IB packet must end on a 8 DW boundary */
- cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8);
+ cik_sdma_ring_insert_nop(ring, (4 - lower_32_bits(ring->wptr)) & 7);
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
@@ -248,7 +250,7 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
u32 ref_and_mask;
- if (ring == &ring->adev->sdma.instance[0].ring)
+ if (ring->me == 0)
ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA0_MASK;
else
ref_and_mask = GPU_HDP_FLUSH_DONE__SDMA1_MASK;
@@ -261,18 +263,13 @@ static void cik_sdma_ring_emit_hdp_flush(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
}
-static void cik_sdma_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
-{
- amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- amdgpu_ring_write(ring, mmHDP_DEBUG0);
- amdgpu_ring_write(ring, 1);
-}
-
/**
* cik_sdma_ring_emit_fence - emit a fence on the DMA ring
*
* @ring: amdgpu ring pointer
- * @fence: amdgpu fence object
+ * @addr: address
+ * @seq: sequence number
+ * @flags: fence related flags
*
* Add a DMA fence packet to the ring to write
* the fence seq number and DMA trap packet to generate
@@ -310,23 +307,15 @@ static void cik_sdma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
*/
static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
{
- struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring;
- struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring;
u32 rb_cntl;
int i;
- if ((adev->mman.buffer_funcs_ring == sdma0) ||
- (adev->mman.buffer_funcs_ring == sdma1))
- amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size);
-
for (i = 0; i < adev->sdma.num_instances; i++) {
rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]);
rb_cntl &= ~SDMA0_GFX_RB_CNTL__RB_ENABLE_MASK;
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
}
- sdma0->ready = false;
- sdma1->ready = false;
}
/**
@@ -342,6 +331,63 @@ static void cik_sdma_rlc_stop(struct amdgpu_device *adev)
}
/**
+ * cik_ctx_switch_enable - stop the async dma engines context switch
+ *
+ * @adev: amdgpu_device pointer
+ * @enable: enable/disable the DMA MEs context switch.
+ *
+ * Halt or unhalt the async dma engines context switch (VI).
+ */
+static void cik_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
+{
+ u32 f32_cntl, phase_quantum = 0;
+ int i;
+
+ if (amdgpu_sdma_phase_quantum) {
+ unsigned value = amdgpu_sdma_phase_quantum;
+ unsigned unit = 0;
+
+ while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
+ SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
+ value = (value + 1) >> 1;
+ unit++;
+ }
+ if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
+ SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
+ value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
+ SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
+ unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
+ SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
+ WARN_ONCE(1,
+ "clamping sdma_phase_quantum to %uK clock cycles\n",
+ value << unit);
+ }
+ phase_quantum =
+ value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
+ unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
+ }
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]);
+ if (enable) {
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+ AUTO_CTXSW_ENABLE, 1);
+ if (amdgpu_sdma_phase_quantum) {
+ WREG32(mmSDMA0_PHASE0_QUANTUM + sdma_offsets[i],
+ phase_quantum);
+ WREG32(mmSDMA0_PHASE1_QUANTUM + sdma_offsets[i],
+ phase_quantum);
+ }
+ } else {
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+ AUTO_CTXSW_ENABLE, 0);
+ }
+
+ WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl);
+ }
+}
+
+/**
* cik_sdma_enable - stop the async dma engines
*
* @adev: amdgpu_device pointer
@@ -382,12 +428,10 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
struct amdgpu_ring *ring;
u32 rb_cntl, ib_cntl;
u32 rb_bufsz;
- u32 wb_offset;
int i, j, r;
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
- wb_offset = (ring->rptr_offs * 4);
mutex_lock(&adev->srbm_mutex);
for (j = 0; j < 16; j++) {
@@ -423,9 +467,9 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
/* set the wb address whether it's enabled or not */
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i],
- upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+ upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i],
- ((adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
+ ((ring->rptr_gpu_addr) & 0xFFFFFFFC));
rb_cntl |= SDMA0_GFX_RB_CNTL__RPTR_WRITEBACK_ENABLE_MASK;
@@ -433,7 +477,7 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40);
ring->wptr = 0;
- WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
+ WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], ring->wptr << 2);
/* enable DMA RB */
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i],
@@ -445,22 +489,15 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
#endif
/* enable DMA IBs */
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
-
- ring->ready = true;
}
cik_sdma_enable(adev, true);
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
-
- if (adev->mman.buffer_funcs_ring == ring)
- amdgpu_ttm_set_active_vram_size(adev, adev->mc.real_vram_size);
}
return 0;
@@ -537,6 +574,8 @@ static int cik_sdma_start(struct amdgpu_device *adev)
/* halt the engine before programing */
cik_sdma_enable(adev, false);
+ /* enable sdma ring preemption */
+ cik_ctx_switch_enable(adev, true);
/* start the gfx rings and rlc compute queues */
r = cik_sdma_gfx_resume(adev);
@@ -567,22 +606,18 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
u32 tmp;
u64 gpu_addr;
- r = amdgpu_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
r = amdgpu_ring_alloc(ring, 5);
- if (r) {
- DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
- amdgpu_wb_free(adev, index);
- return r;
- }
+ if (r)
+ goto error_free_wb;
+
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
@@ -594,18 +629,14 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
tmp = le32_to_cpu(adev->wb.wb[index]);
if (tmp == 0xDEADBEEF)
break;
- DRM_UDELAY(1);
+ udelay(1);
}
- if (i < adev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- amdgpu_wb_free(adev, index);
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+error_free_wb:
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -613,6 +644,7 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
* cik_sdma_ring_test_ib - test an IB on the DMA engine
*
* @ring: amdgpu_ring structure holding ring information
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
*
* Test a simple IB in the DMA ring (CIK).
* Returns 0 on success, error on failure.
@@ -627,21 +659,18 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
u64 gpu_addr;
long r;
- r = amdgpu_wb_get(adev, &index);
- if (r) {
- dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r)
return r;
- }
gpu_addr = adev->wb.gpu_addr + (index * 4);
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
+ if (r)
goto err0;
- }
ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE,
SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
@@ -656,32 +685,27 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out\n");
r = -ETIMEDOUT;
goto err1;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err1;
}
tmp = le32_to_cpu(adev->wb.wb[index]);
- if (tmp == 0xDEADBEEF) {
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+ if (tmp == 0xDEADBEEF)
r = 0;
- } else {
- DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
+ else
r = -EINVAL;
- }
err1:
- amdgpu_ib_free(adev, &ib, NULL);
+ amdgpu_ib_free(&ib, NULL);
dma_fence_put(f);
err0:
- amdgpu_wb_free(adev, index);
+ amdgpu_device_wb_free(adev, index);
return r;
}
/**
- * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
+ * cik_sdma_vm_copy_pte - update PTEs by copying them from the GART
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
@@ -707,7 +731,7 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
}
/**
- * cik_sdma_vm_write_pages - update PTEs by writing them manually
+ * cik_sdma_vm_write_pte - update PTEs by writing them manually
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
@@ -736,7 +760,7 @@ static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
}
/**
- * cik_sdma_vm_set_pages - update the page tables using sDMA
+ * cik_sdma_vm_set_pte_pde - update the page tables using sDMA
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
@@ -765,18 +789,19 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
}
/**
- * cik_sdma_vm_pad_ib - pad the IB to the required number of dw
+ * cik_sdma_ring_pad_ib - pad the IB to the required number of dw
*
+ * @ring: amdgpu_ring structure holding ring information
* @ib: indirect buffer to fill with padding
*
*/
static void cik_sdma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
{
- struct amdgpu_sdma_instance *sdma = amdgpu_get_sdma_instance(ring);
+ struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
u32 pad_count;
int i;
- pad_count = (8 - (ib->length_dw & 0x7)) % 8;
+ pad_count = (-ib->length_dw) & 7;
for (i = 0; i < pad_count; i++)
if (sdma && sdma->burst_nop && (i == 0))
ib->ptr[ib->length_dw++] =
@@ -807,7 +832,7 @@ static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); /* reference */
- amdgpu_ring_write(ring, 0xfffffff); /* mask */
+ amdgpu_ring_write(ring, 0xffffffff); /* mask */
amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */
}
@@ -815,29 +840,19 @@ static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
* cik_sdma_ring_emit_vm_flush - cik vm flush using sDMA
*
* @ring: amdgpu_ring pointer
- * @vm: amdgpu_vm pointer
+ * @vmid: vmid number to use
+ * @pd_addr: address
*
* Update the page table base and flush the VM TLB
* using sDMA (CIK).
*/
static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
+ unsigned vmid, uint64_t pd_addr)
{
u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
- amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- if (vm_id < 8) {
- amdgpu_ring_write(ring, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
- } else {
- amdgpu_ring_write(ring, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8));
- }
- amdgpu_ring_write(ring, pd_addr >> 12);
-
- /* flush TLB */
- amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST);
- amdgpu_ring_write(ring, 1 << vm_id);
+ amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
@@ -847,6 +862,14 @@ static void cik_sdma_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
}
+static void cik_sdma_ring_emit_wreg(struct amdgpu_ring *ring,
+ uint32_t reg, uint32_t val)
+{
+ amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ amdgpu_ring_write(ring, reg);
+ amdgpu_ring_write(ring, val);
+}
+
static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
bool enable)
{
@@ -896,12 +919,17 @@ static void cik_enable_sdma_mgls(struct amdgpu_device *adev,
}
}
-static int cik_sdma_early_init(void *handle)
+static int cik_sdma_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
adev->sdma.num_instances = SDMA_MAX_INSTANCE;
+ r = cik_sdma_init_microcode(adev);
+ if (r)
+ return r;
+
cik_sdma_set_ring_funcs(adev);
cik_sdma_set_irq_funcs(adev);
cik_sdma_set_buffer_funcs(adev);
@@ -910,32 +938,26 @@ static int cik_sdma_early_init(void *handle)
return 0;
}
-static int cik_sdma_sw_init(void *handle)
+static int cik_sdma_sw_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_ring *ring;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int r, i;
- r = cik_sdma_init_microcode(adev);
- if (r) {
- DRM_ERROR("Failed to load sdma firmware!\n");
- return r;
- }
-
/* SDMA trap event */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224,
&adev->sdma.trap_irq);
if (r)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
/* SDMA Privileged inst */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 247,
&adev->sdma.illegal_inst_irq);
if (r)
return r;
@@ -946,9 +968,9 @@ static int cik_sdma_sw_init(void *handle)
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
- (i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 :
- AMDGPU_SDMA_IRQ_TRAP1);
+ (i == 0) ? AMDGPU_SDMA_IRQ_INSTANCE0 :
+ AMDGPU_SDMA_IRQ_INSTANCE1,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
@@ -956,9 +978,9 @@ static int cik_sdma_sw_init(void *handle)
return r;
}
-static int cik_sdma_sw_fini(void *handle)
+static int cik_sdma_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int i;
for (i = 0; i < adev->sdma.num_instances; i++)
@@ -968,46 +990,38 @@ static int cik_sdma_sw_fini(void *handle)
return 0;
}
-static int cik_sdma_hw_init(void *handle)
+static int cik_sdma_hw_init(struct amdgpu_ip_block *ip_block)
{
- int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- r = cik_sdma_start(adev);
- if (r)
- return r;
+ struct amdgpu_device *adev = ip_block->adev;
- return r;
+ return cik_sdma_start(adev);
}
-static int cik_sdma_hw_fini(void *handle)
+static int cik_sdma_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
+ cik_ctx_switch_enable(adev, false);
cik_sdma_enable(adev, false);
return 0;
}
-static int cik_sdma_suspend(void *handle)
+static int cik_sdma_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return cik_sdma_hw_fini(adev);
+ return cik_sdma_hw_fini(ip_block);
}
-static int cik_sdma_resume(void *handle)
+static int cik_sdma_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cik_sdma_soft_reset(ip_block);
- cik_sdma_soft_reset(handle);
-
- return cik_sdma_hw_init(adev);
+ return cik_sdma_hw_init(ip_block);
}
-static bool cik_sdma_is_idle(void *handle)
+static bool cik_sdma_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS2);
if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK |
@@ -1017,43 +1031,36 @@ static bool cik_sdma_is_idle(void *handle)
return true;
}
-static int cik_sdma_wait_for_idle(void *handle)
+static int cik_sdma_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
- u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK |
- SRBM_STATUS2__SDMA1_BUSY_MASK);
-
- if (!tmp)
+ if (cik_sdma_is_idle(ip_block))
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
-static int cik_sdma_soft_reset(void *handle)
+static int cik_sdma_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 srbm_soft_reset = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- u32 tmp = RREG32(mmSRBM_STATUS2);
+ struct amdgpu_device *adev = ip_block->adev;
+ u32 tmp;
- if (tmp & SRBM_STATUS2__SDMA_BUSY_MASK) {
- /* sdma0 */
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
- tmp |= SDMA0_F32_CNTL__HALT_MASK;
- WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
- }
- if (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK) {
- /* sdma1 */
- tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
- tmp |= SDMA0_F32_CNTL__HALT_MASK;
- WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
- }
+ /* sdma0 */
+ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET);
+ tmp |= SDMA0_F32_CNTL__HALT_MASK;
+ WREG32(mmSDMA0_F32_CNTL + SDMA0_REGISTER_OFFSET, tmp);
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK;
+
+ /* sdma1 */
+ tmp = RREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET);
+ tmp |= SDMA0_F32_CNTL__HALT_MASK;
+ WREG32(mmSDMA0_F32_CNTL + SDMA1_REGISTER_OFFSET, tmp);
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK;
if (srbm_soft_reset) {
tmp = RREG32(mmSRBM_SOFT_RESET);
@@ -1083,7 +1090,7 @@ static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
switch (type) {
- case AMDGPU_SDMA_IRQ_TRAP0:
+ case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
@@ -1099,7 +1106,7 @@ static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev,
break;
}
break;
- case AMDGPU_SDMA_IRQ_TRAP1:
+ case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
@@ -1166,16 +1173,19 @@ static int cik_sdma_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
+ u8 instance_id;
+
DRM_ERROR("Illegal instruction in SDMA command stream\n");
- schedule_work(&adev->reset_work);
+ instance_id = (entry->ring_id & 0x3) >> 0;
+ drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched);
return 0;
}
-static int cik_sdma_set_clockgating_state(void *handle,
+static int cik_sdma_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
bool gate = false;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (state == AMD_CG_STATE_GATE)
gate = true;
@@ -1186,7 +1196,7 @@ static int cik_sdma_set_clockgating_state(void *handle,
return 0;
}
-static int cik_sdma_set_powergating_state(void *handle,
+static int cik_sdma_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -1195,7 +1205,6 @@ static int cik_sdma_set_powergating_state(void *handle,
static const struct amd_ip_funcs cik_sdma_ip_funcs = {
.name = "cik_sdma",
.early_init = cik_sdma_early_init,
- .late_init = NULL,
.sw_init = cik_sdma_sw_init,
.sw_fini = cik_sdma_sw_fini,
.hw_init = cik_sdma_hw_init,
@@ -1219,9 +1228,9 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
.set_wptr = cik_sdma_ring_set_wptr,
.emit_frame_size =
6 + /* cik_sdma_ring_emit_hdp_flush */
- 3 + /* cik_sdma_ring_emit_hdp_invalidate */
+ 3 + /* hdp invalidate */
6 + /* cik_sdma_ring_emit_pipeline_sync */
- 12 + /* cik_sdma_ring_emit_vm_flush */
+ CIK_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* cik_sdma_ring_emit_vm_flush */
9 + 9 + 9, /* cik_sdma_ring_emit_fence x3 for user fence, vm fence */
.emit_ib_size = 7 + 4, /* cik_sdma_ring_emit_ib */
.emit_ib = cik_sdma_ring_emit_ib,
@@ -1229,19 +1238,21 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
.emit_pipeline_sync = cik_sdma_ring_emit_pipeline_sync,
.emit_vm_flush = cik_sdma_ring_emit_vm_flush,
.emit_hdp_flush = cik_sdma_ring_emit_hdp_flush,
- .emit_hdp_invalidate = cik_sdma_ring_emit_hdp_invalidate,
.test_ring = cik_sdma_ring_test_ring,
.test_ib = cik_sdma_ring_test_ib,
.insert_nop = cik_sdma_ring_insert_nop,
.pad_ib = cik_sdma_ring_pad_ib,
+ .emit_wreg = cik_sdma_ring_emit_wreg,
};
static void cik_sdma_set_ring_funcs(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->sdma.num_instances; i++)
+ for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.instance[i].ring.funcs = &cik_sdma_ring_funcs;
+ adev->sdma.instance[i].ring.me = i;
+ }
}
static const struct amdgpu_irq_src_funcs cik_sdma_trap_irq_funcs = {
@@ -1263,10 +1274,11 @@ static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
/**
* cik_sdma_emit_copy_buffer - copy buffer using the sDMA engine
*
- * @ring: amdgpu_ring structure holding ring information
+ * @ib: indirect buffer to copy to
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
+ * @copy_flags: unused
*
* Copy GPU buffers using the DMA engine (CIK).
* Used by the amdgpu ttm implementation to move pages if
@@ -1275,7 +1287,8 @@ static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
- uint32_t byte_count)
+ uint32_t byte_count,
+ uint32_t copy_flags)
{
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
ib->ptr[ib->length_dw++] = byte_count;
@@ -1289,7 +1302,7 @@ static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib,
/**
* cik_sdma_emit_fill_buffer - fill buffer using the sDMA engine
*
- * @ring: amdgpu_ring structure holding ring information
+ * @ib: indirect buffer to fill
* @src_data: value to write to buffer
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
@@ -1320,14 +1333,14 @@ static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = {
static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
{
- if (adev->mman.buffer_funcs == NULL) {
- adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
- adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
- }
+ adev->mman.buffer_funcs = &cik_sdma_buffer_funcs;
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
+ .copy_pte_num_dw = 7,
.copy_pte = cik_sdma_vm_copy_pte,
+
.write_pte = cik_sdma_vm_write_pte,
.set_pte_pde = cik_sdma_vm_set_pte_pde,
};
@@ -1336,14 +1349,12 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev)
{
unsigned i;
- if (adev->vm_manager.vm_pte_funcs == NULL) {
- adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++)
- adev->vm_manager.vm_pte_rings[i] =
- &adev->sdma.instance[i].ring;
-
- adev->vm_manager.vm_pte_num_rings = adev->sdma.num_instances;
+ adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs;
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ adev->vm_manager.vm_pte_scheds[i] =
+ &adev->sdma.instance[i].ring.sched;
}
+ adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version cik_sdma_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index 6a9e38a3d2a0..8aca4f2734f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -51,10 +51,14 @@
#define HPD4_REGISTER_OFFSET (0x1813 - 0x1807)
#define HPD5_REGISTER_OFFSET (0x1816 - 0x1807)
-#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
-#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003
-
-#define AMDGPU_NUM_OF_VMIDS 8
+/* audio endpt instance offsets */
+#define AUD0_REGISTER_OFFSET (0x1780 - 0x1780)
+#define AUD1_REGISTER_OFFSET (0x1786 - 0x1780)
+#define AUD2_REGISTER_OFFSET (0x178c - 0x1780)
+#define AUD3_REGISTER_OFFSET (0x1792 - 0x1780)
+#define AUD4_REGISTER_OFFSET (0x1798 - 0x1780)
+#define AUD5_REGISTER_OFFSET (0x179d - 0x1780)
+#define AUD6_REGISTER_OFFSET (0x17a4 - 0x1780)
#define PIPEID(x) ((x) << 0)
#define MEID(x) ((x) << 2)
@@ -366,6 +370,7 @@
* 1 - Stream
* 2 - Bypass
*/
+#define EOP_EXEC (1 << 28) /* For Trailing Fence */
#define DATA_SEL(x) ((x) << 29)
/* 0 - discard
* 1 - send low 32bit data
@@ -450,7 +455,7 @@
# define PACKET3_DMA_DATA_CMD_SAIC (1 << 28)
# define PACKET3_DMA_DATA_CMD_DAIC (1 << 29)
# define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30)
-#define PACKET3_AQUIRE_MEM 0x58
+#define PACKET3_ACQUIRE_MEM 0x58
#define PACKET3_REWIND 0x59
#define PACKET3_LOAD_UCONFIG_REG 0x5E
#define PACKET3_LOAD_SH_REG 0x5F
@@ -562,7 +567,7 @@
#define PRIVATE_BASE(x) ((x) << 0) /* scratch */
#define SHARED_BASE(x) ((x) << 16) /* LDS */
-#define KFD_CIK_SDMA_QUEUE_OFFSET 0x200
+#define KFD_CIK_SDMA_QUEUE_OFFSET (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL)
/* valid for both DEFAULT_MTYPE and APE1_MTYPE */
enum {
diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx10.h b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx10.h
new file mode 100644
index 000000000000..27a2ff0a07d2
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx10.h
@@ -0,0 +1,975 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+static const unsigned int gfx10_SECT_CONTEXT_def_1[] = {
+ 0x00000000, // DB_RENDER_CONTROL
+ 0x00000000, // DB_COUNT_CONTROL
+ 0x00000000, // DB_DEPTH_VIEW
+ 0x00000000, // DB_RENDER_OVERRIDE
+ 0x00000000, // DB_RENDER_OVERRIDE2
+ 0x00000000, // DB_HTILE_DATA_BASE
+ 0x00000000, // HOLE
+ 0x00000000, // DB_DEPTH_SIZE_XY
+ 0x00000000, // DB_DEPTH_BOUNDS_MIN
+ 0x00000000, // DB_DEPTH_BOUNDS_MAX
+ 0x00000000, // DB_STENCIL_CLEAR
+ 0x00000000, // DB_DEPTH_CLEAR
+ 0x00000000, // PA_SC_SCREEN_SCISSOR_TL
+ 0x40004000, // PA_SC_SCREEN_SCISSOR_BR
+ 0x00000000, // DB_DFSM_CONTROL
+ 0x00000000, // DB_DEPTH_INFO
+ 0x00000000, // DB_Z_INFO
+ 0x00000000, // DB_STENCIL_INFO
+ 0x00000000, // DB_Z_READ_BASE
+ 0x00000000, // DB_STENCIL_READ_BASE
+ 0x00000000, // DB_Z_WRITE_BASE
+ 0x00000000, // DB_STENCIL_WRITE_BASE
+ 0x00000000, // DB_DEPTH_SIZE
+ 0x00000000, // DB_DEPTH_SLICE
+ 0x00000000, // DB_Z_INFO2
+ 0x00000000, // DB_STENCIL_INFO2
+ 0x00000000, // DB_Z_READ_BASE_HI
+ 0x00000000, // DB_STENCIL_READ_BASE_HI
+ 0x00000000, // DB_Z_WRITE_BASE_HI
+ 0x00000000, // DB_STENCIL_WRITE_BASE_HI
+ 0x00000000, // DB_HTILE_DATA_BASE_HI
+ 0x00150055, // DB_RMI_L2_CACHE_CONTROL
+ 0x00000000, // TA_BC_BASE_ADDR
+ 0x00000000, // TA_BC_BASE_ADDR_HI
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // COHER_DEST_BASE_HI_0
+ 0x00000000, // COHER_DEST_BASE_HI_1
+ 0x00000000, // COHER_DEST_BASE_HI_2
+ 0x00000000, // COHER_DEST_BASE_HI_3
+ 0x00000000, // COHER_DEST_BASE_2
+ 0x00000000, // COHER_DEST_BASE_3
+ 0x00000000, // PA_SC_WINDOW_OFFSET
+ 0x80000000, // PA_SC_WINDOW_SCISSOR_TL
+ 0x40004000, // PA_SC_WINDOW_SCISSOR_BR
+ 0x0000ffff, // PA_SC_CLIPRECT_RULE
+ 0x00000000, // PA_SC_CLIPRECT_0_TL
+ 0x40004000, // PA_SC_CLIPRECT_0_BR
+ 0x00000000, // PA_SC_CLIPRECT_1_TL
+ 0x40004000, // PA_SC_CLIPRECT_1_BR
+ 0x00000000, // PA_SC_CLIPRECT_2_TL
+ 0x40004000, // PA_SC_CLIPRECT_2_BR
+ 0x00000000, // PA_SC_CLIPRECT_3_TL
+ 0x40004000, // PA_SC_CLIPRECT_3_BR
+ 0xaa99aaaa, // PA_SC_EDGERULE
+ 0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
+ 0xffffffff, // CB_TARGET_MASK
+ 0xffffffff, // CB_SHADER_MASK
+ 0x80000000, // PA_SC_GENERIC_SCISSOR_TL
+ 0x40004000, // PA_SC_GENERIC_SCISSOR_BR
+ 0x00000000, // COHER_DEST_BASE_0
+ 0x00000000, // COHER_DEST_BASE_1
+ 0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
+ 0x00000000, // PA_SC_VPORT_ZMIN_0
+ 0x3f800000, // PA_SC_VPORT_ZMAX_0
+ 0x00000000, // PA_SC_VPORT_ZMIN_1
+ 0x3f800000, // PA_SC_VPORT_ZMAX_1
+ 0x00000000, // PA_SC_VPORT_ZMIN_2
+ 0x3f800000, // PA_SC_VPORT_ZMAX_2
+ 0x00000000, // PA_SC_VPORT_ZMIN_3
+ 0x3f800000, // PA_SC_VPORT_ZMAX_3
+ 0x00000000, // PA_SC_VPORT_ZMIN_4
+ 0x3f800000, // PA_SC_VPORT_ZMAX_4
+ 0x00000000, // PA_SC_VPORT_ZMIN_5
+ 0x3f800000, // PA_SC_VPORT_ZMAX_5
+ 0x00000000, // PA_SC_VPORT_ZMIN_6
+ 0x3f800000, // PA_SC_VPORT_ZMAX_6
+ 0x00000000, // PA_SC_VPORT_ZMIN_7
+ 0x3f800000, // PA_SC_VPORT_ZMAX_7
+ 0x00000000, // PA_SC_VPORT_ZMIN_8
+ 0x3f800000, // PA_SC_VPORT_ZMAX_8
+ 0x00000000, // PA_SC_VPORT_ZMIN_9
+ 0x3f800000, // PA_SC_VPORT_ZMAX_9
+ 0x00000000, // PA_SC_VPORT_ZMIN_10
+ 0x3f800000, // PA_SC_VPORT_ZMAX_10
+ 0x00000000, // PA_SC_VPORT_ZMIN_11
+ 0x3f800000, // PA_SC_VPORT_ZMAX_11
+ 0x00000000, // PA_SC_VPORT_ZMIN_12
+ 0x3f800000, // PA_SC_VPORT_ZMAX_12
+ 0x00000000, // PA_SC_VPORT_ZMIN_13
+ 0x3f800000, // PA_SC_VPORT_ZMAX_13
+ 0x00000000, // PA_SC_VPORT_ZMIN_14
+ 0x3f800000, // PA_SC_VPORT_ZMAX_14
+ 0x00000000, // PA_SC_VPORT_ZMIN_15
+ 0x3f800000, // PA_SC_VPORT_ZMAX_15
+ 0x00000000, // PA_SC_RASTER_CONFIG
+ 0x00000000, // PA_SC_RASTER_CONFIG_1
+ 0x00000000, // PA_SC_SCREEN_EXTENT_CONTROL
+};
+static const unsigned int gfx10_SECT_CONTEXT_def_2[] = {
+ 0x00000000, // CP_PERFMON_CNTX_CNTL
+ 0x00000000, // CP_RINGID
+ 0x00000000, // CP_VMID
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // PA_SC_RIGHT_VERT_GRID
+ 0x00000000, // PA_SC_LEFT_VERT_GRID
+ 0x00000000, // PA_SC_HORIZ_GRID
+ 0x00000000, // HOLE
+ 0x00000000, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0xffffffff, // VGT_MAX_VTX_INDX
+ 0x00000000, // VGT_MIN_VTX_INDX
+ 0x00000000, // VGT_INDX_OFFSET
+ 0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
+ 0x00550055, // CB_RMI_GL2_CACHE_CONTROL
+ 0x00000000, // CB_BLEND_RED
+ 0x00000000, // CB_BLEND_GREEN
+ 0x00000000, // CB_BLEND_BLUE
+ 0x00000000, // CB_BLEND_ALPHA
+ 0x00000000, // CB_DCC_CONTROL
+ 0x00000000, // CB_COVERAGE_OUT_CONTROL
+ 0x00000000, // DB_STENCIL_CONTROL
+ 0x01000000, // DB_STENCILREFMASK
+ 0x01000000, // DB_STENCILREFMASK_BF
+ 0, // HOLE
+ 0x00000000, // PA_CL_VPORT_XSCALE
+ 0x00000000, // PA_CL_VPORT_XOFFSET
+ 0x00000000, // PA_CL_VPORT_YSCALE
+ 0x00000000, // PA_CL_VPORT_YOFFSET
+ 0x00000000, // PA_CL_VPORT_ZSCALE
+ 0x00000000, // PA_CL_VPORT_ZOFFSET
+ 0x00000000, // PA_CL_VPORT_XSCALE_1
+ 0x00000000, // PA_CL_VPORT_XOFFSET_1
+ 0x00000000, // PA_CL_VPORT_YSCALE_1
+ 0x00000000, // PA_CL_VPORT_YOFFSET_1
+ 0x00000000, // PA_CL_VPORT_ZSCALE_1
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_1
+ 0x00000000, // PA_CL_VPORT_XSCALE_2
+ 0x00000000, // PA_CL_VPORT_XOFFSET_2
+ 0x00000000, // PA_CL_VPORT_YSCALE_2
+ 0x00000000, // PA_CL_VPORT_YOFFSET_2
+ 0x00000000, // PA_CL_VPORT_ZSCALE_2
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_2
+ 0x00000000, // PA_CL_VPORT_XSCALE_3
+ 0x00000000, // PA_CL_VPORT_XOFFSET_3
+ 0x00000000, // PA_CL_VPORT_YSCALE_3
+ 0x00000000, // PA_CL_VPORT_YOFFSET_3
+ 0x00000000, // PA_CL_VPORT_ZSCALE_3
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_3
+ 0x00000000, // PA_CL_VPORT_XSCALE_4
+ 0x00000000, // PA_CL_VPORT_XOFFSET_4
+ 0x00000000, // PA_CL_VPORT_YSCALE_4
+ 0x00000000, // PA_CL_VPORT_YOFFSET_4
+ 0x00000000, // PA_CL_VPORT_ZSCALE_4
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_4
+ 0x00000000, // PA_CL_VPORT_XSCALE_5
+ 0x00000000, // PA_CL_VPORT_XOFFSET_5
+ 0x00000000, // PA_CL_VPORT_YSCALE_5
+ 0x00000000, // PA_CL_VPORT_YOFFSET_5
+ 0x00000000, // PA_CL_VPORT_ZSCALE_5
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_5
+ 0x00000000, // PA_CL_VPORT_XSCALE_6
+ 0x00000000, // PA_CL_VPORT_XOFFSET_6
+ 0x00000000, // PA_CL_VPORT_YSCALE_6
+ 0x00000000, // PA_CL_VPORT_YOFFSET_6
+ 0x00000000, // PA_CL_VPORT_ZSCALE_6
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_6
+ 0x00000000, // PA_CL_VPORT_XSCALE_7
+ 0x00000000, // PA_CL_VPORT_XOFFSET_7
+ 0x00000000, // PA_CL_VPORT_YSCALE_7
+ 0x00000000, // PA_CL_VPORT_YOFFSET_7
+ 0x00000000, // PA_CL_VPORT_ZSCALE_7
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_7
+ 0x00000000, // PA_CL_VPORT_XSCALE_8
+ 0x00000000, // PA_CL_VPORT_XOFFSET_8
+ 0x00000000, // PA_CL_VPORT_YSCALE_8
+ 0x00000000, // PA_CL_VPORT_YOFFSET_8
+ 0x00000000, // PA_CL_VPORT_ZSCALE_8
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_8
+ 0x00000000, // PA_CL_VPORT_XSCALE_9
+ 0x00000000, // PA_CL_VPORT_XOFFSET_9
+ 0x00000000, // PA_CL_VPORT_YSCALE_9
+ 0x00000000, // PA_CL_VPORT_YOFFSET_9
+ 0x00000000, // PA_CL_VPORT_ZSCALE_9
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_9
+ 0x00000000, // PA_CL_VPORT_XSCALE_10
+ 0x00000000, // PA_CL_VPORT_XOFFSET_10
+ 0x00000000, // PA_CL_VPORT_YSCALE_10
+ 0x00000000, // PA_CL_VPORT_YOFFSET_10
+ 0x00000000, // PA_CL_VPORT_ZSCALE_10
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_10
+ 0x00000000, // PA_CL_VPORT_XSCALE_11
+ 0x00000000, // PA_CL_VPORT_XOFFSET_11
+ 0x00000000, // PA_CL_VPORT_YSCALE_11
+ 0x00000000, // PA_CL_VPORT_YOFFSET_11
+ 0x00000000, // PA_CL_VPORT_ZSCALE_11
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_11
+ 0x00000000, // PA_CL_VPORT_XSCALE_12
+ 0x00000000, // PA_CL_VPORT_XOFFSET_12
+ 0x00000000, // PA_CL_VPORT_YSCALE_12
+ 0x00000000, // PA_CL_VPORT_YOFFSET_12
+ 0x00000000, // PA_CL_VPORT_ZSCALE_12
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_12
+ 0x00000000, // PA_CL_VPORT_XSCALE_13
+ 0x00000000, // PA_CL_VPORT_XOFFSET_13
+ 0x00000000, // PA_CL_VPORT_YSCALE_13
+ 0x00000000, // PA_CL_VPORT_YOFFSET_13
+ 0x00000000, // PA_CL_VPORT_ZSCALE_13
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_13
+ 0x00000000, // PA_CL_VPORT_XSCALE_14
+ 0x00000000, // PA_CL_VPORT_XOFFSET_14
+ 0x00000000, // PA_CL_VPORT_YSCALE_14
+ 0x00000000, // PA_CL_VPORT_YOFFSET_14
+ 0x00000000, // PA_CL_VPORT_ZSCALE_14
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_14
+ 0x00000000, // PA_CL_VPORT_XSCALE_15
+ 0x00000000, // PA_CL_VPORT_XOFFSET_15
+ 0x00000000, // PA_CL_VPORT_YSCALE_15
+ 0x00000000, // PA_CL_VPORT_YOFFSET_15
+ 0x00000000, // PA_CL_VPORT_ZSCALE_15
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_15
+ 0x00000000, // PA_CL_UCP_0_X
+ 0x00000000, // PA_CL_UCP_0_Y
+ 0x00000000, // PA_CL_UCP_0_Z
+ 0x00000000, // PA_CL_UCP_0_W
+ 0x00000000, // PA_CL_UCP_1_X
+ 0x00000000, // PA_CL_UCP_1_Y
+ 0x00000000, // PA_CL_UCP_1_Z
+ 0x00000000, // PA_CL_UCP_1_W
+ 0x00000000, // PA_CL_UCP_2_X
+ 0x00000000, // PA_CL_UCP_2_Y
+ 0x00000000, // PA_CL_UCP_2_Z
+ 0x00000000, // PA_CL_UCP_2_W
+ 0x00000000, // PA_CL_UCP_3_X
+ 0x00000000, // PA_CL_UCP_3_Y
+ 0x00000000, // PA_CL_UCP_3_Z
+ 0x00000000, // PA_CL_UCP_3_W
+ 0x00000000, // PA_CL_UCP_4_X
+ 0x00000000, // PA_CL_UCP_4_Y
+ 0x00000000, // PA_CL_UCP_4_Z
+ 0x00000000, // PA_CL_UCP_4_W
+ 0x00000000, // PA_CL_UCP_5_X
+ 0x00000000, // PA_CL_UCP_5_Y
+ 0x00000000, // PA_CL_UCP_5_Z
+ 0x00000000, // PA_CL_UCP_5_W
+ 0x00000000, // PA_CL_PROG_NEAR_CLIP_Z
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // SPI_PS_INPUT_CNTL_0
+ 0x00000000, // SPI_PS_INPUT_CNTL_1
+ 0x00000000, // SPI_PS_INPUT_CNTL_2
+ 0x00000000, // SPI_PS_INPUT_CNTL_3
+ 0x00000000, // SPI_PS_INPUT_CNTL_4
+ 0x00000000, // SPI_PS_INPUT_CNTL_5
+ 0x00000000, // SPI_PS_INPUT_CNTL_6
+ 0x00000000, // SPI_PS_INPUT_CNTL_7
+ 0x00000000, // SPI_PS_INPUT_CNTL_8
+ 0x00000000, // SPI_PS_INPUT_CNTL_9
+ 0x00000000, // SPI_PS_INPUT_CNTL_10
+ 0x00000000, // SPI_PS_INPUT_CNTL_11
+ 0x00000000, // SPI_PS_INPUT_CNTL_12
+ 0x00000000, // SPI_PS_INPUT_CNTL_13
+ 0x00000000, // SPI_PS_INPUT_CNTL_14
+ 0x00000000, // SPI_PS_INPUT_CNTL_15
+ 0x00000000, // SPI_PS_INPUT_CNTL_16
+ 0x00000000, // SPI_PS_INPUT_CNTL_17
+ 0x00000000, // SPI_PS_INPUT_CNTL_18
+ 0x00000000, // SPI_PS_INPUT_CNTL_19
+ 0x00000000, // SPI_PS_INPUT_CNTL_20
+ 0x00000000, // SPI_PS_INPUT_CNTL_21
+ 0x00000000, // SPI_PS_INPUT_CNTL_22
+ 0x00000000, // SPI_PS_INPUT_CNTL_23
+ 0x00000000, // SPI_PS_INPUT_CNTL_24
+ 0x00000000, // SPI_PS_INPUT_CNTL_25
+ 0x00000000, // SPI_PS_INPUT_CNTL_26
+ 0x00000000, // SPI_PS_INPUT_CNTL_27
+ 0x00000000, // SPI_PS_INPUT_CNTL_28
+ 0x00000000, // SPI_PS_INPUT_CNTL_29
+ 0x00000000, // SPI_PS_INPUT_CNTL_30
+ 0x00000000, // SPI_PS_INPUT_CNTL_31
+ 0x00000000, // SPI_VS_OUT_CONFIG
+ 0, // HOLE
+ 0x00000000, // SPI_PS_INPUT_ENA
+ 0x00000000, // SPI_PS_INPUT_ADDR
+ 0x00000000, // SPI_INTERP_CONTROL_0
+ 0x00000002, // SPI_PS_IN_CONTROL
+ 0, // HOLE
+ 0x00000000, // SPI_BARYC_CNTL
+ 0, // HOLE
+ 0x00000000, // SPI_TMPRING_SIZE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // SPI_SHADER_IDX_FORMAT
+ 0x00000000, // SPI_SHADER_POS_FORMAT
+ 0x00000000, // SPI_SHADER_Z_FORMAT
+ 0x00000000, // SPI_SHADER_COL_FORMAT
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // SX_PS_DOWNCONVERT
+ 0x00000000, // SX_BLEND_OPT_EPSILON
+ 0x00000000, // SX_BLEND_OPT_CONTROL
+ 0x00000000, // SX_MRT0_BLEND_OPT
+ 0x00000000, // SX_MRT1_BLEND_OPT
+ 0x00000000, // SX_MRT2_BLEND_OPT
+ 0x00000000, // SX_MRT3_BLEND_OPT
+ 0x00000000, // SX_MRT4_BLEND_OPT
+ 0x00000000, // SX_MRT5_BLEND_OPT
+ 0x00000000, // SX_MRT6_BLEND_OPT
+ 0x00000000, // SX_MRT7_BLEND_OPT
+ 0x00000000, // CB_BLEND0_CONTROL
+ 0x00000000, // CB_BLEND1_CONTROL
+ 0x00000000, // CB_BLEND2_CONTROL
+ 0x00000000, // CB_BLEND3_CONTROL
+ 0x00000000, // CB_BLEND4_CONTROL
+ 0x00000000, // CB_BLEND5_CONTROL
+ 0x00000000, // CB_BLEND6_CONTROL
+ 0x00000000, // CB_BLEND7_CONTROL
+};
+static const unsigned int gfx10_SECT_CONTEXT_def_3[] = {
+ 0x00000000, // PA_CL_POINT_X_RAD
+ 0x00000000, // PA_CL_POINT_Y_RAD
+ 0x00000000, // PA_CL_POINT_SIZE
+ 0x00000000, // PA_CL_POINT_CULL_RAD
+};
+static const unsigned int gfx10_SECT_CONTEXT_def_4[] = {
+ 0x00000000, // VGT_GS_MAX_PRIMS_PER_SUBGROUP
+ 0x00000000, // DB_DEPTH_CONTROL
+ 0x00000000, // DB_EQAA
+ 0x00000000, // CB_COLOR_CONTROL
+ 0x00000000, // DB_SHADER_CONTROL
+ 0x00090000, // PA_CL_CLIP_CNTL
+ 0x00000004, // PA_SU_SC_MODE_CNTL
+ 0x00000000, // PA_CL_VTE_CNTL
+ 0x00000000, // PA_CL_VS_OUT_CNTL
+ 0x00000000, // PA_CL_NANINF_CNTL
+ 0x00000000, // PA_SU_LINE_STIPPLE_CNTL
+ 0x00000000, // PA_SU_LINE_STIPPLE_SCALE
+ 0x00000000, // PA_SU_PRIM_FILTER_CNTL
+ 0x00000000, // PA_SU_SMALL_PRIM_FILTER_CNTL
+ 0x00000000, // PA_CL_OBJPRIM_ID_CNTL
+ 0x00000000, // PA_CL_NGG_CNTL
+ 0x00000000, // PA_SU_OVER_RASTERIZATION_CNTL
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // PA_SU_POINT_SIZE
+ 0x00000000, // PA_SU_POINT_MINMAX
+ 0x00000000, // PA_SU_LINE_CNTL
+ 0x00000000, // PA_SC_LINE_STIPPLE
+ 0x00000000, // VGT_OUTPUT_PATH_CNTL
+ 0x00000000, // VGT_HOS_CNTL
+ 0x00000000, // VGT_HOS_MAX_TESS_LEVEL
+ 0x00000000, // VGT_HOS_MIN_TESS_LEVEL
+ 0x00000000, // VGT_HOS_REUSE_DEPTH
+ 0x00000000, // VGT_GROUP_PRIM_TYPE
+ 0x00000000, // VGT_GROUP_FIRST_DECR
+ 0x00000000, // VGT_GROUP_DECR
+ 0x00000000, // VGT_GROUP_VECT_0_CNTL
+ 0x00000000, // VGT_GROUP_VECT_1_CNTL
+ 0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
+ 0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
+ 0x00000000, // VGT_GS_MODE
+ 0x00000000, // VGT_GS_ONCHIP_CNTL
+ 0x00000000, // PA_SC_MODE_CNTL_0
+ 0x00000000, // PA_SC_MODE_CNTL_1
+ 0x00000000, // VGT_ENHANCE
+ 0x00000100, // VGT_GS_PER_ES
+ 0x00000080, // VGT_ES_PER_GS
+ 0x00000002, // VGT_GS_PER_VS
+ 0x00000000, // VGT_GSVS_RING_OFFSET_1
+ 0x00000000, // VGT_GSVS_RING_OFFSET_2
+ 0x00000000, // VGT_GSVS_RING_OFFSET_3
+ 0x00000000, // VGT_GS_OUT_PRIM_TYPE
+ 0x00000000, // IA_ENHANCE
+};
+static const unsigned int gfx10_SECT_CONTEXT_def_5[] = {
+ 0x00000000, // WD_ENHANCE
+ 0x00000000, // VGT_PRIMITIVEID_EN
+};
+static const unsigned int gfx10_SECT_CONTEXT_def_6[] = {
+ 0x00000000, // VGT_PRIMITIVEID_RESET
+};
+static const unsigned int gfx10_SECT_CONTEXT_def_7[] = {
+ 0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
+ 0x00000000, // VGT_DRAW_PAYLOAD_CNTL
+ 0x00000000, // HOLE
+ 0x00000000, // VGT_INSTANCE_STEP_RATE_0
+ 0x00000000, // VGT_INSTANCE_STEP_RATE_1
+ 0x000000ff, // IA_MULTI_VGT_PARAM
+ 0x00000000, // VGT_ESGS_RING_ITEMSIZE
+ 0x00000000, // VGT_GSVS_RING_ITEMSIZE
+ 0x00000000, // VGT_REUSE_OFF
+ 0x00000000, // VGT_VTX_CNT_EN
+ 0x00000000, // DB_HTILE_SURFACE
+ 0x00000000, // DB_SRESULTS_COMPARE_STATE0
+ 0x00000000, // DB_SRESULTS_COMPARE_STATE1
+ 0x00000000, // DB_PRELOAD_CONTROL
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+ 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+ 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+ 0, // HOLE
+ 0x00000000, // VGT_GS_MAX_VERT_OUT
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // VGT_TESS_DISTRIBUTION
+ 0x00000000, // VGT_SHADER_STAGES_EN
+ 0x00000000, // VGT_LS_HS_CONFIG
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE_1
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE_2
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE_3
+ 0x00000000, // VGT_TF_PARAM
+ 0x00000000, // DB_ALPHA_TO_MASK
+ 0x00000000, // VGT_DISPATCH_DRAW_INDEX
+ 0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
+ 0x00000000, // PA_SU_POLY_OFFSET_CLAMP
+ 0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
+ 0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
+ 0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
+ 0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
+ 0x00000000, // VGT_GS_INSTANCE_CNT
+ 0x00000000, // VGT_STRMOUT_CONFIG
+ 0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
+};
+static const unsigned int gfx10_SECT_CONTEXT_def_8[] = {
+ 0x00000000, // PA_SC_CENTROID_PRIORITY_0
+ 0x00000000, // PA_SC_CENTROID_PRIORITY_1
+ 0x00001000, // PA_SC_LINE_CNTL
+ 0x00000000, // PA_SC_AA_CONFIG
+ 0x00000005, // PA_SU_VTX_CNTL
+ 0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
+ 0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
+ 0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
+ 0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
+ 0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0
+ 0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1
+ 0x00000000, // PA_SC_SHADER_CONTROL
+ 0x00000003, // PA_SC_BINNER_CNTL_0
+ 0x00000000, // PA_SC_BINNER_CNTL_1
+ 0x00100000, // PA_SC_CONSERVATIVE_RASTERIZATION_CNTL
+ 0x00000000, // PA_SC_NGG_MODE_CNTL
+ 0, // HOLE
+ 0x0000001e, // VGT_VERTEX_REUSE_BLOCK_CNTL
+ 0x00000020, // VGT_OUT_DEALLOC_CNTL
+ 0x00000000, // CB_COLOR0_BASE
+ 0x00000000, // CB_COLOR0_PITCH
+ 0x00000000, // CB_COLOR0_SLICE
+ 0x00000000, // CB_COLOR0_VIEW
+ 0x00000000, // CB_COLOR0_INFO
+ 0x00000000, // CB_COLOR0_ATTRIB
+ 0x00000000, // CB_COLOR0_DCC_CONTROL
+ 0x00000000, // CB_COLOR0_CMASK
+ 0x00000000, // CB_COLOR0_CMASK_SLICE
+ 0x00000000, // CB_COLOR0_FMASK
+ 0x00000000, // CB_COLOR0_FMASK_SLICE
+ 0x00000000, // CB_COLOR0_CLEAR_WORD0
+ 0x00000000, // CB_COLOR0_CLEAR_WORD1
+ 0x00000000, // CB_COLOR0_DCC_BASE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR1_BASE
+ 0x00000000, // CB_COLOR1_PITCH
+ 0x00000000, // CB_COLOR1_SLICE
+ 0x00000000, // CB_COLOR1_VIEW
+ 0x00000000, // CB_COLOR1_INFO
+ 0x00000000, // CB_COLOR1_ATTRIB
+ 0x00000000, // CB_COLOR1_DCC_CONTROL
+ 0x00000000, // CB_COLOR1_CMASK
+ 0x00000000, // CB_COLOR1_CMASK_SLICE
+ 0x00000000, // CB_COLOR1_FMASK
+ 0x00000000, // CB_COLOR1_FMASK_SLICE
+ 0x00000000, // CB_COLOR1_CLEAR_WORD0
+ 0x00000000, // CB_COLOR1_CLEAR_WORD1
+ 0x00000000, // CB_COLOR1_DCC_BASE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR2_BASE
+ 0x00000000, // CB_COLOR2_PITCH
+ 0x00000000, // CB_COLOR2_SLICE
+ 0x00000000, // CB_COLOR2_VIEW
+ 0x00000000, // CB_COLOR2_INFO
+ 0x00000000, // CB_COLOR2_ATTRIB
+ 0x00000000, // CB_COLOR2_DCC_CONTROL
+ 0x00000000, // CB_COLOR2_CMASK
+ 0x00000000, // CB_COLOR2_CMASK_SLICE
+ 0x00000000, // CB_COLOR2_FMASK
+ 0x00000000, // CB_COLOR2_FMASK_SLICE
+ 0x00000000, // CB_COLOR2_CLEAR_WORD0
+ 0x00000000, // CB_COLOR2_CLEAR_WORD1
+ 0x00000000, // CB_COLOR2_DCC_BASE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR3_BASE
+ 0x00000000, // CB_COLOR3_PITCH
+ 0x00000000, // CB_COLOR3_SLICE
+ 0x00000000, // CB_COLOR3_VIEW
+ 0x00000000, // CB_COLOR3_INFO
+ 0x00000000, // CB_COLOR3_ATTRIB
+ 0x00000000, // CB_COLOR3_DCC_CONTROL
+ 0x00000000, // CB_COLOR3_CMASK
+ 0x00000000, // CB_COLOR3_CMASK_SLICE
+ 0x00000000, // CB_COLOR3_FMASK
+ 0x00000000, // CB_COLOR3_FMASK_SLICE
+ 0x00000000, // CB_COLOR3_CLEAR_WORD0
+ 0x00000000, // CB_COLOR3_CLEAR_WORD1
+ 0x00000000, // CB_COLOR3_DCC_BASE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR4_BASE
+ 0x00000000, // CB_COLOR4_PITCH
+ 0x00000000, // CB_COLOR4_SLICE
+ 0x00000000, // CB_COLOR4_VIEW
+ 0x00000000, // CB_COLOR4_INFO
+ 0x00000000, // CB_COLOR4_ATTRIB
+ 0x00000000, // CB_COLOR4_DCC_CONTROL
+ 0x00000000, // CB_COLOR4_CMASK
+ 0x00000000, // CB_COLOR4_CMASK_SLICE
+ 0x00000000, // CB_COLOR4_FMASK
+ 0x00000000, // CB_COLOR4_FMASK_SLICE
+ 0x00000000, // CB_COLOR4_CLEAR_WORD0
+ 0x00000000, // CB_COLOR4_CLEAR_WORD1
+ 0x00000000, // CB_COLOR4_DCC_BASE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR5_BASE
+ 0x00000000, // CB_COLOR5_PITCH
+ 0x00000000, // CB_COLOR5_SLICE
+ 0x00000000, // CB_COLOR5_VIEW
+ 0x00000000, // CB_COLOR5_INFO
+ 0x00000000, // CB_COLOR5_ATTRIB
+ 0x00000000, // CB_COLOR5_DCC_CONTROL
+ 0x00000000, // CB_COLOR5_CMASK
+ 0x00000000, // CB_COLOR5_CMASK_SLICE
+ 0x00000000, // CB_COLOR5_FMASK
+ 0x00000000, // CB_COLOR5_FMASK_SLICE
+ 0x00000000, // CB_COLOR5_CLEAR_WORD0
+ 0x00000000, // CB_COLOR5_CLEAR_WORD1
+ 0x00000000, // CB_COLOR5_DCC_BASE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR6_BASE
+ 0x00000000, // CB_COLOR6_PITCH
+ 0x00000000, // CB_COLOR6_SLICE
+ 0x00000000, // CB_COLOR6_VIEW
+ 0x00000000, // CB_COLOR6_INFO
+ 0x00000000, // CB_COLOR6_ATTRIB
+ 0x00000000, // CB_COLOR6_DCC_CONTROL
+ 0x00000000, // CB_COLOR6_CMASK
+ 0x00000000, // CB_COLOR6_CMASK_SLICE
+ 0x00000000, // CB_COLOR6_FMASK
+ 0x00000000, // CB_COLOR6_FMASK_SLICE
+ 0x00000000, // CB_COLOR6_CLEAR_WORD0
+ 0x00000000, // CB_COLOR6_CLEAR_WORD1
+ 0x00000000, // CB_COLOR6_DCC_BASE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR7_BASE
+ 0x00000000, // CB_COLOR7_PITCH
+ 0x00000000, // CB_COLOR7_SLICE
+ 0x00000000, // CB_COLOR7_VIEW
+ 0x00000000, // CB_COLOR7_INFO
+ 0x00000000, // CB_COLOR7_ATTRIB
+ 0x00000000, // CB_COLOR7_DCC_CONTROL
+ 0x00000000, // CB_COLOR7_CMASK
+ 0x00000000, // CB_COLOR7_CMASK_SLICE
+ 0x00000000, // CB_COLOR7_FMASK
+ 0x00000000, // CB_COLOR7_FMASK_SLICE
+ 0x00000000, // CB_COLOR7_CLEAR_WORD0
+ 0x00000000, // CB_COLOR7_CLEAR_WORD1
+ 0x00000000, // CB_COLOR7_DCC_BASE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR0_BASE_EXT
+ 0x00000000, // CB_COLOR1_BASE_EXT
+ 0x00000000, // CB_COLOR2_BASE_EXT
+ 0x00000000, // CB_COLOR3_BASE_EXT
+ 0x00000000, // CB_COLOR4_BASE_EXT
+ 0x00000000, // CB_COLOR5_BASE_EXT
+ 0x00000000, // CB_COLOR6_BASE_EXT
+ 0x00000000, // CB_COLOR7_BASE_EXT
+ 0x00000000, // CB_COLOR0_CMASK_BASE_EXT
+ 0x00000000, // CB_COLOR1_CMASK_BASE_EXT
+ 0x00000000, // CB_COLOR2_CMASK_BASE_EXT
+ 0x00000000, // CB_COLOR3_CMASK_BASE_EXT
+ 0x00000000, // CB_COLOR4_CMASK_BASE_EXT
+ 0x00000000, // CB_COLOR5_CMASK_BASE_EXT
+ 0x00000000, // CB_COLOR6_CMASK_BASE_EXT
+ 0x00000000, // CB_COLOR7_CMASK_BASE_EXT
+ 0x00000000, // CB_COLOR0_FMASK_BASE_EXT
+ 0x00000000, // CB_COLOR1_FMASK_BASE_EXT
+ 0x00000000, // CB_COLOR2_FMASK_BASE_EXT
+ 0x00000000, // CB_COLOR3_FMASK_BASE_EXT
+ 0x00000000, // CB_COLOR4_FMASK_BASE_EXT
+ 0x00000000, // CB_COLOR5_FMASK_BASE_EXT
+ 0x00000000, // CB_COLOR6_FMASK_BASE_EXT
+ 0x00000000, // CB_COLOR7_FMASK_BASE_EXT
+ 0x00000000, // CB_COLOR0_DCC_BASE_EXT
+ 0x00000000, // CB_COLOR1_DCC_BASE_EXT
+ 0x00000000, // CB_COLOR2_DCC_BASE_EXT
+ 0x00000000, // CB_COLOR3_DCC_BASE_EXT
+ 0x00000000, // CB_COLOR4_DCC_BASE_EXT
+ 0x00000000, // CB_COLOR5_DCC_BASE_EXT
+ 0x00000000, // CB_COLOR6_DCC_BASE_EXT
+ 0x00000000, // CB_COLOR7_DCC_BASE_EXT
+ 0x00000000, // CB_COLOR0_ATTRIB2
+ 0x00000000, // CB_COLOR1_ATTRIB2
+ 0x00000000, // CB_COLOR2_ATTRIB2
+ 0x00000000, // CB_COLOR3_ATTRIB2
+ 0x00000000, // CB_COLOR4_ATTRIB2
+ 0x00000000, // CB_COLOR5_ATTRIB2
+ 0x00000000, // CB_COLOR6_ATTRIB2
+ 0x00000000, // CB_COLOR7_ATTRIB2
+ 0x00000000, // CB_COLOR0_ATTRIB3
+ 0x00000000, // CB_COLOR1_ATTRIB3
+ 0x00000000, // CB_COLOR2_ATTRIB3
+ 0x00000000, // CB_COLOR3_ATTRIB3
+ 0x00000000, // CB_COLOR4_ATTRIB3
+ 0x00000000, // CB_COLOR5_ATTRIB3
+ 0x00000000, // CB_COLOR6_ATTRIB3
+ 0x00000000, // CB_COLOR7_ATTRIB3
+};
+static const struct cs_extent_def gfx10_SECT_CONTEXT_defs[] = {
+ {gfx10_SECT_CONTEXT_def_1, 0x0000a000, 215 },
+ {gfx10_SECT_CONTEXT_def_2, 0x0000a0d8, 272 },
+ {gfx10_SECT_CONTEXT_def_3, 0x0000a1f5, 4 },
+ {gfx10_SECT_CONTEXT_def_4, 0x0000a1ff, 158 },
+ {gfx10_SECT_CONTEXT_def_5, 0x0000a2a0, 2 },
+ {gfx10_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
+ {gfx10_SECT_CONTEXT_def_7, 0x0000a2a5, 66 },
+ {gfx10_SECT_CONTEXT_def_8, 0x0000a2f5, 203 },
+ { 0, 0, 0 }
+};
+static const struct cs_section_def gfx10_cs_data[] = {
+ { gfx10_SECT_CONTEXT_defs, SECT_CONTEXT },
+ { 0, SECT_NONE }
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx11.h b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx11.h
new file mode 100644
index 000000000000..a8b29d33c464
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx11.h
@@ -0,0 +1,997 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __CLEARSTATE_GFX11_H_
+#define __CLEARSTATE_GFX11_H_
+
+static const unsigned int gfx11_SECT_CONTEXT_def_1[] =
+{
+ 0x00000000, // DB_RENDER_CONTROL
+ 0x00000000, // DB_COUNT_CONTROL
+ 0x00000000, // DB_DEPTH_VIEW
+ 0x00000000, // DB_RENDER_OVERRIDE
+ 0x00000000, // DB_RENDER_OVERRIDE2
+ 0x00000000, // DB_HTILE_DATA_BASE
+ 0, // HOLE
+ 0x00000000, // DB_DEPTH_SIZE_XY
+ 0x00000000, // DB_DEPTH_BOUNDS_MIN
+ 0x00000000, // DB_DEPTH_BOUNDS_MAX
+ 0x00000000, // DB_STENCIL_CLEAR
+ 0x00000000, // DB_DEPTH_CLEAR
+ 0x00000000, // PA_SC_SCREEN_SCISSOR_TL
+ 0x40004000, // PA_SC_SCREEN_SCISSOR_BR
+ 0, // HOLE
+ 0x00000000, // DB_RESERVED_REG_2
+ 0x00000000, // DB_Z_INFO
+ 0x00000000, // DB_STENCIL_INFO
+ 0x00000000, // DB_Z_READ_BASE
+ 0x00000000, // DB_STENCIL_READ_BASE
+ 0x00000000, // DB_Z_WRITE_BASE
+ 0x00000000, // DB_STENCIL_WRITE_BASE
+ 0x00000000, // DB_RESERVED_REG_1
+ 0x00000000, // DB_RESERVED_REG_3
+ 0x00000000, // DB_SPI_VRS_CENTER_LOCATION
+ 0, // HOLE
+ 0x00000000, // DB_Z_READ_BASE_HI
+ 0x00000000, // DB_STENCIL_READ_BASE_HI
+ 0x00000000, // DB_Z_WRITE_BASE_HI
+ 0x00000000, // DB_STENCIL_WRITE_BASE_HI
+ 0x00000000, // DB_HTILE_DATA_BASE_HI
+ 0x00150055, // DB_RMI_L2_CACHE_CONTROL
+ 0x00000000, // TA_BC_BASE_ADDR
+ 0x00000000, // TA_BC_BASE_ADDR_HI
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // COHER_DEST_BASE_HI_0
+ 0x00000000, // COHER_DEST_BASE_HI_1
+ 0x00000000, // COHER_DEST_BASE_HI_2
+ 0x00000000, // COHER_DEST_BASE_HI_3
+ 0x00000000, // COHER_DEST_BASE_2
+ 0x00000000, // COHER_DEST_BASE_3
+ 0x00000000, // PA_SC_WINDOW_OFFSET
+ 0x80000000, // PA_SC_WINDOW_SCISSOR_TL
+ 0x40004000, // PA_SC_WINDOW_SCISSOR_BR
+ 0x0000ffff, // PA_SC_CLIPRECT_RULE
+ 0x00000000, // PA_SC_CLIPRECT_0_TL
+ 0x40004000, // PA_SC_CLIPRECT_0_BR
+ 0x00000000, // PA_SC_CLIPRECT_1_TL
+ 0x40004000, // PA_SC_CLIPRECT_1_BR
+ 0x00000000, // PA_SC_CLIPRECT_2_TL
+ 0x40004000, // PA_SC_CLIPRECT_2_BR
+ 0x00000000, // PA_SC_CLIPRECT_3_TL
+ 0x40004000, // PA_SC_CLIPRECT_3_BR
+ 0xaa99aaaa, // PA_SC_EDGERULE
+ 0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
+ 0xffffffff, // CB_TARGET_MASK
+ 0xffffffff, // CB_SHADER_MASK
+ 0x80000000, // PA_SC_GENERIC_SCISSOR_TL
+ 0x40004000, // PA_SC_GENERIC_SCISSOR_BR
+ 0x00000000, // COHER_DEST_BASE_0
+ 0x00000000, // COHER_DEST_BASE_1
+ 0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
+ 0x00000000, // PA_SC_VPORT_ZMIN_0
+ 0x3f800000, // PA_SC_VPORT_ZMAX_0
+ 0x00000000, // PA_SC_VPORT_ZMIN_1
+ 0x3f800000, // PA_SC_VPORT_ZMAX_1
+ 0x00000000, // PA_SC_VPORT_ZMIN_2
+ 0x3f800000, // PA_SC_VPORT_ZMAX_2
+ 0x00000000, // PA_SC_VPORT_ZMIN_3
+ 0x3f800000, // PA_SC_VPORT_ZMAX_3
+ 0x00000000, // PA_SC_VPORT_ZMIN_4
+ 0x3f800000, // PA_SC_VPORT_ZMAX_4
+ 0x00000000, // PA_SC_VPORT_ZMIN_5
+ 0x3f800000, // PA_SC_VPORT_ZMAX_5
+ 0x00000000, // PA_SC_VPORT_ZMIN_6
+ 0x3f800000, // PA_SC_VPORT_ZMAX_6
+ 0x00000000, // PA_SC_VPORT_ZMIN_7
+ 0x3f800000, // PA_SC_VPORT_ZMAX_7
+ 0x00000000, // PA_SC_VPORT_ZMIN_8
+ 0x3f800000, // PA_SC_VPORT_ZMAX_8
+ 0x00000000, // PA_SC_VPORT_ZMIN_9
+ 0x3f800000, // PA_SC_VPORT_ZMAX_9
+ 0x00000000, // PA_SC_VPORT_ZMIN_10
+ 0x3f800000, // PA_SC_VPORT_ZMAX_10
+ 0x00000000, // PA_SC_VPORT_ZMIN_11
+ 0x3f800000, // PA_SC_VPORT_ZMAX_11
+ 0x00000000, // PA_SC_VPORT_ZMIN_12
+ 0x3f800000, // PA_SC_VPORT_ZMAX_12
+ 0x00000000, // PA_SC_VPORT_ZMIN_13
+ 0x3f800000, // PA_SC_VPORT_ZMAX_13
+ 0x00000000, // PA_SC_VPORT_ZMIN_14
+ 0x3f800000, // PA_SC_VPORT_ZMAX_14
+ 0x00000000, // PA_SC_VPORT_ZMIN_15
+ 0x3f800000, // PA_SC_VPORT_ZMAX_15
+ 0x00000000, // PA_SC_RASTER_CONFIG
+ 0x00000000, // PA_SC_RASTER_CONFIG_1
+ 0x00000000, // PA_SC_SCREEN_EXTENT_CONTROL
+};
+static const unsigned int gfx11_SECT_CONTEXT_def_2[] =
+{
+ 0x00000000, // CP_PERFMON_CNTX_CNTL
+ 0x00000000, // CP_PIPEID
+ 0x00000000, // CP_VMID
+ 0x00000000, // CONTEXT_RESERVED_REG0
+ 0x00000000, // CONTEXT_RESERVED_REG1
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // PA_SC_FSR_EN
+ 0x00000000, // PA_SC_FSR_FBW_RECURSIONS_X
+ 0x00000000, // PA_SC_FSR_FBW_RECURSIONS_Y
+ 0, // HOLE
+ 0x00000000, // PA_SC_VRS_OVERRIDE_CNTL
+ 0x00000000, // PA_SC_VRS_RATE_FEEDBACK_BASE
+ 0x00000000, // PA_SC_VRS_RATE_FEEDBACK_BASE_EXT
+ 0x00000000, // PA_SC_VRS_RATE_FEEDBACK_SIZE_XY
+ 0x00000000, // PA_SC_BINNER_OUTPUT_TIMEOUT_CNTL
+ 0x00000000, // PA_SC_VRS_RATE_CACHE_CNTL
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // PA_SC_VRS_RATE_BASE
+ 0x00000000, // PA_SC_VRS_RATE_BASE_EXT
+ 0x00000000, // PA_SC_VRS_RATE_SIZE_XY
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
+ 0x00550055, // CB_RMI_GL2_CACHE_CONTROL
+ 0x00000000, // CB_BLEND_RED
+ 0x00000000, // CB_BLEND_GREEN
+ 0x00000000, // CB_BLEND_BLUE
+ 0x00000000, // CB_BLEND_ALPHA
+ 0x00000000, // CB_FDCC_CONTROL
+ 0x00000000, // CB_COVERAGE_OUT_CONTROL
+ 0x00000000, // DB_STENCIL_CONTROL
+ 0x01000000, // DB_STENCILREFMASK
+ 0x01000000, // DB_STENCILREFMASK_BF
+ 0, // HOLE
+ 0x00000000, // PA_CL_VPORT_XSCALE
+ 0x00000000, // PA_CL_VPORT_XOFFSET
+ 0x00000000, // PA_CL_VPORT_YSCALE
+ 0x00000000, // PA_CL_VPORT_YOFFSET
+ 0x00000000, // PA_CL_VPORT_ZSCALE
+ 0x00000000, // PA_CL_VPORT_ZOFFSET
+ 0x00000000, // PA_CL_VPORT_XSCALE_1
+ 0x00000000, // PA_CL_VPORT_XOFFSET_1
+ 0x00000000, // PA_CL_VPORT_YSCALE_1
+ 0x00000000, // PA_CL_VPORT_YOFFSET_1
+ 0x00000000, // PA_CL_VPORT_ZSCALE_1
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_1
+ 0x00000000, // PA_CL_VPORT_XSCALE_2
+ 0x00000000, // PA_CL_VPORT_XOFFSET_2
+ 0x00000000, // PA_CL_VPORT_YSCALE_2
+ 0x00000000, // PA_CL_VPORT_YOFFSET_2
+ 0x00000000, // PA_CL_VPORT_ZSCALE_2
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_2
+ 0x00000000, // PA_CL_VPORT_XSCALE_3
+ 0x00000000, // PA_CL_VPORT_XOFFSET_3
+ 0x00000000, // PA_CL_VPORT_YSCALE_3
+ 0x00000000, // PA_CL_VPORT_YOFFSET_3
+ 0x00000000, // PA_CL_VPORT_ZSCALE_3
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_3
+ 0x00000000, // PA_CL_VPORT_XSCALE_4
+ 0x00000000, // PA_CL_VPORT_XOFFSET_4
+ 0x00000000, // PA_CL_VPORT_YSCALE_4
+ 0x00000000, // PA_CL_VPORT_YOFFSET_4
+ 0x00000000, // PA_CL_VPORT_ZSCALE_4
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_4
+ 0x00000000, // PA_CL_VPORT_XSCALE_5
+ 0x00000000, // PA_CL_VPORT_XOFFSET_5
+ 0x00000000, // PA_CL_VPORT_YSCALE_5
+ 0x00000000, // PA_CL_VPORT_YOFFSET_5
+ 0x00000000, // PA_CL_VPORT_ZSCALE_5
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_5
+ 0x00000000, // PA_CL_VPORT_XSCALE_6
+ 0x00000000, // PA_CL_VPORT_XOFFSET_6
+ 0x00000000, // PA_CL_VPORT_YSCALE_6
+ 0x00000000, // PA_CL_VPORT_YOFFSET_6
+ 0x00000000, // PA_CL_VPORT_ZSCALE_6
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_6
+ 0x00000000, // PA_CL_VPORT_XSCALE_7
+ 0x00000000, // PA_CL_VPORT_XOFFSET_7
+ 0x00000000, // PA_CL_VPORT_YSCALE_7
+ 0x00000000, // PA_CL_VPORT_YOFFSET_7
+ 0x00000000, // PA_CL_VPORT_ZSCALE_7
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_7
+ 0x00000000, // PA_CL_VPORT_XSCALE_8
+ 0x00000000, // PA_CL_VPORT_XOFFSET_8
+ 0x00000000, // PA_CL_VPORT_YSCALE_8
+ 0x00000000, // PA_CL_VPORT_YOFFSET_8
+ 0x00000000, // PA_CL_VPORT_ZSCALE_8
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_8
+ 0x00000000, // PA_CL_VPORT_XSCALE_9
+ 0x00000000, // PA_CL_VPORT_XOFFSET_9
+ 0x00000000, // PA_CL_VPORT_YSCALE_9
+ 0x00000000, // PA_CL_VPORT_YOFFSET_9
+ 0x00000000, // PA_CL_VPORT_ZSCALE_9
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_9
+ 0x00000000, // PA_CL_VPORT_XSCALE_10
+ 0x00000000, // PA_CL_VPORT_XOFFSET_10
+ 0x00000000, // PA_CL_VPORT_YSCALE_10
+ 0x00000000, // PA_CL_VPORT_YOFFSET_10
+ 0x00000000, // PA_CL_VPORT_ZSCALE_10
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_10
+ 0x00000000, // PA_CL_VPORT_XSCALE_11
+ 0x00000000, // PA_CL_VPORT_XOFFSET_11
+ 0x00000000, // PA_CL_VPORT_YSCALE_11
+ 0x00000000, // PA_CL_VPORT_YOFFSET_11
+ 0x00000000, // PA_CL_VPORT_ZSCALE_11
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_11
+ 0x00000000, // PA_CL_VPORT_XSCALE_12
+ 0x00000000, // PA_CL_VPORT_XOFFSET_12
+ 0x00000000, // PA_CL_VPORT_YSCALE_12
+ 0x00000000, // PA_CL_VPORT_YOFFSET_12
+ 0x00000000, // PA_CL_VPORT_ZSCALE_12
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_12
+ 0x00000000, // PA_CL_VPORT_XSCALE_13
+ 0x00000000, // PA_CL_VPORT_XOFFSET_13
+ 0x00000000, // PA_CL_VPORT_YSCALE_13
+ 0x00000000, // PA_CL_VPORT_YOFFSET_13
+ 0x00000000, // PA_CL_VPORT_ZSCALE_13
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_13
+ 0x00000000, // PA_CL_VPORT_XSCALE_14
+ 0x00000000, // PA_CL_VPORT_XOFFSET_14
+ 0x00000000, // PA_CL_VPORT_YSCALE_14
+ 0x00000000, // PA_CL_VPORT_YOFFSET_14
+ 0x00000000, // PA_CL_VPORT_ZSCALE_14
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_14
+ 0x00000000, // PA_CL_VPORT_XSCALE_15
+ 0x00000000, // PA_CL_VPORT_XOFFSET_15
+ 0x00000000, // PA_CL_VPORT_YSCALE_15
+ 0x00000000, // PA_CL_VPORT_YOFFSET_15
+ 0x00000000, // PA_CL_VPORT_ZSCALE_15
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_15
+ 0x00000000, // PA_CL_UCP_0_X
+ 0x00000000, // PA_CL_UCP_0_Y
+ 0x00000000, // PA_CL_UCP_0_Z
+ 0x00000000, // PA_CL_UCP_0_W
+ 0x00000000, // PA_CL_UCP_1_X
+ 0x00000000, // PA_CL_UCP_1_Y
+ 0x00000000, // PA_CL_UCP_1_Z
+ 0x00000000, // PA_CL_UCP_1_W
+ 0x00000000, // PA_CL_UCP_2_X
+ 0x00000000, // PA_CL_UCP_2_Y
+ 0x00000000, // PA_CL_UCP_2_Z
+ 0x00000000, // PA_CL_UCP_2_W
+ 0x00000000, // PA_CL_UCP_3_X
+ 0x00000000, // PA_CL_UCP_3_Y
+ 0x00000000, // PA_CL_UCP_3_Z
+ 0x00000000, // PA_CL_UCP_3_W
+ 0x00000000, // PA_CL_UCP_4_X
+ 0x00000000, // PA_CL_UCP_4_Y
+ 0x00000000, // PA_CL_UCP_4_Z
+ 0x00000000, // PA_CL_UCP_4_W
+ 0x00000000, // PA_CL_UCP_5_X
+ 0x00000000, // PA_CL_UCP_5_Y
+ 0x00000000, // PA_CL_UCP_5_Z
+ 0x00000000, // PA_CL_UCP_5_W
+ 0x00000000, // PA_CL_PROG_NEAR_CLIP_Z
+ 0x00000000, // PA_RATE_CNTL
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // SPI_PS_INPUT_CNTL_0
+ 0x00000000, // SPI_PS_INPUT_CNTL_1
+ 0x00000000, // SPI_PS_INPUT_CNTL_2
+ 0x00000000, // SPI_PS_INPUT_CNTL_3
+ 0x00000000, // SPI_PS_INPUT_CNTL_4
+ 0x00000000, // SPI_PS_INPUT_CNTL_5
+ 0x00000000, // SPI_PS_INPUT_CNTL_6
+ 0x00000000, // SPI_PS_INPUT_CNTL_7
+ 0x00000000, // SPI_PS_INPUT_CNTL_8
+ 0x00000000, // SPI_PS_INPUT_CNTL_9
+ 0x00000000, // SPI_PS_INPUT_CNTL_10
+ 0x00000000, // SPI_PS_INPUT_CNTL_11
+ 0x00000000, // SPI_PS_INPUT_CNTL_12
+ 0x00000000, // SPI_PS_INPUT_CNTL_13
+ 0x00000000, // SPI_PS_INPUT_CNTL_14
+ 0x00000000, // SPI_PS_INPUT_CNTL_15
+ 0x00000000, // SPI_PS_INPUT_CNTL_16
+ 0x00000000, // SPI_PS_INPUT_CNTL_17
+ 0x00000000, // SPI_PS_INPUT_CNTL_18
+ 0x00000000, // SPI_PS_INPUT_CNTL_19
+ 0x00000000, // SPI_PS_INPUT_CNTL_20
+ 0x00000000, // SPI_PS_INPUT_CNTL_21
+ 0x00000000, // SPI_PS_INPUT_CNTL_22
+ 0x00000000, // SPI_PS_INPUT_CNTL_23
+ 0x00000000, // SPI_PS_INPUT_CNTL_24
+ 0x00000000, // SPI_PS_INPUT_CNTL_25
+ 0x00000000, // SPI_PS_INPUT_CNTL_26
+ 0x00000000, // SPI_PS_INPUT_CNTL_27
+ 0x00000000, // SPI_PS_INPUT_CNTL_28
+ 0x00000000, // SPI_PS_INPUT_CNTL_29
+ 0x00000000, // SPI_PS_INPUT_CNTL_30
+ 0x00000000, // SPI_PS_INPUT_CNTL_31
+ 0x00000000, // SPI_VS_OUT_CONFIG
+ 0, // HOLE
+ 0x00000000, // SPI_PS_INPUT_ENA
+ 0x00000000, // SPI_PS_INPUT_ADDR
+ 0x00000000, // SPI_INTERP_CONTROL_0
+ 0x00000002, // SPI_PS_IN_CONTROL
+ 0x00000000, // SPI_BARYC_SSAA_CNTL
+ 0x00000000, // SPI_BARYC_CNTL
+ 0, // HOLE
+ 0x00000000, // SPI_TMPRING_SIZE
+ 0x00000000, // SPI_GFX_SCRATCH_BASE_LO
+ 0x00000000, // SPI_GFX_SCRATCH_BASE_HI
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // SPI_SHADER_IDX_FORMAT
+ 0x00000000, // SPI_SHADER_POS_FORMAT
+ 0x00000000, // SPI_SHADER_Z_FORMAT
+ 0x00000000, // SPI_SHADER_COL_FORMAT
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // SX_PS_DOWNCONVERT_CONTROL
+ 0x00000000, // SX_PS_DOWNCONVERT
+ 0x00000000, // SX_BLEND_OPT_EPSILON
+ 0x00000000, // SX_BLEND_OPT_CONTROL
+ 0x00000000, // SX_MRT0_BLEND_OPT
+ 0x00000000, // SX_MRT1_BLEND_OPT
+ 0x00000000, // SX_MRT2_BLEND_OPT
+ 0x00000000, // SX_MRT3_BLEND_OPT
+ 0x00000000, // SX_MRT4_BLEND_OPT
+ 0x00000000, // SX_MRT5_BLEND_OPT
+ 0x00000000, // SX_MRT6_BLEND_OPT
+ 0x00000000, // SX_MRT7_BLEND_OPT
+ 0x00000000, // CB_BLEND0_CONTROL
+ 0x00000000, // CB_BLEND1_CONTROL
+ 0x00000000, // CB_BLEND2_CONTROL
+ 0x00000000, // CB_BLEND3_CONTROL
+ 0x00000000, // CB_BLEND4_CONTROL
+ 0x00000000, // CB_BLEND5_CONTROL
+ 0x00000000, // CB_BLEND6_CONTROL
+ 0x00000000, // CB_BLEND7_CONTROL
+};
+static const unsigned int gfx11_SECT_CONTEXT_def_3[] =
+{
+ 0x00000000, // PA_CL_POINT_X_RAD
+ 0x00000000, // PA_CL_POINT_Y_RAD
+ 0x00000000, // PA_CL_POINT_SIZE
+ 0x00000000, // PA_CL_POINT_CULL_RAD
+};
+static const unsigned int gfx11_SECT_CONTEXT_def_4[] =
+{
+ 0x00000000, // GE_MAX_OUTPUT_PER_SUBGROUP
+ 0x00000000, // DB_DEPTH_CONTROL
+ 0x00000000, // DB_EQAA
+ 0x00000000, // CB_COLOR_CONTROL
+ 0x00000000, // DB_SHADER_CONTROL
+ 0x00090000, // PA_CL_CLIP_CNTL
+ 0x00000004, // PA_SU_SC_MODE_CNTL
+ 0x00000000, // PA_CL_VTE_CNTL
+ 0x00000000, // PA_CL_VS_OUT_CNTL
+ 0x00000000, // PA_CL_NANINF_CNTL
+ 0x00000000, // PA_SU_LINE_STIPPLE_CNTL
+ 0x00000000, // PA_SU_LINE_STIPPLE_SCALE
+ 0x00000000, // PA_SU_PRIM_FILTER_CNTL
+ 0x00000000, // PA_SU_SMALL_PRIM_FILTER_CNTL
+ 0, // HOLE
+ 0x00000000, // PA_CL_NGG_CNTL
+ 0x00000000, // PA_SU_OVER_RASTERIZATION_CNTL
+ 0x00000000, // PA_STEREO_CNTL
+ 0x00000000, // PA_STATE_STEREO_X
+ 0x00000000, // PA_CL_VRS_CNTL
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // PA_SU_POINT_SIZE
+ 0x00000000, // PA_SU_POINT_MINMAX
+ 0x00000000, // PA_SU_LINE_CNTL
+ 0x00000000, // PA_SC_LINE_STIPPLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // VGT_HOS_MAX_TESS_LEVEL
+ 0x00000000, // VGT_HOS_MIN_TESS_LEVEL
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // VGT_GS_ONCHIP_CNTL
+ 0x00000000, // PA_SC_MODE_CNTL_0
+ 0x00000000, // PA_SC_MODE_CNTL_1
+ 0x00000000, // VGT_ENHANCE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // IA_ENHANCE
+};
+static const unsigned int gfx11_SECT_CONTEXT_def_5[] =
+{
+ 0x00000000, // WD_ENHANCE
+ 0x00000000, // VGT_PRIMITIVEID_EN
+};
+static const unsigned int gfx11_SECT_CONTEXT_def_6[] =
+{
+ 0x00000000, // VGT_PRIMITIVEID_RESET
+};
+static const unsigned int gfx11_SECT_CONTEXT_def_7[] =
+{
+ 0x00000000, // VGT_DRAW_PAYLOAD_CNTL
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // VGT_ESGS_RING_ITEMSIZE
+ 0, // HOLE
+ 0x00000000, // VGT_REUSE_OFF
+ 0, // HOLE
+ 0x00000000, // DB_HTILE_SURFACE
+ 0x00000000, // DB_SRESULTS_COMPARE_STATE0
+ 0x00000000, // DB_SRESULTS_COMPARE_STATE1
+ 0x00000000, // DB_PRELOAD_CONTROL
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+ 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+ 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+ 0, // HOLE
+ 0x00000000, // VGT_GS_MAX_VERT_OUT
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // GE_NGG_SUBGRP_CNTL
+ 0x00000000, // VGT_TESS_DISTRIBUTION
+ 0x00000000, // VGT_SHADER_STAGES_EN
+ 0x00000000, // VGT_LS_HS_CONFIG
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // VGT_TF_PARAM
+ 0x00000000, // DB_ALPHA_TO_MASK
+ 0, // HOLE
+ 0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
+ 0x00000000, // PA_SU_POLY_OFFSET_CLAMP
+ 0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
+ 0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
+ 0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
+ 0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
+ 0x00000000, // VGT_GS_INSTANCE_CNT
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // PA_SC_CENTROID_PRIORITY_0
+ 0x00000000, // PA_SC_CENTROID_PRIORITY_1
+ 0x00001000, // PA_SC_LINE_CNTL
+ 0x00000000, // PA_SC_AA_CONFIG
+ 0x00000005, // PA_SU_VTX_CNTL
+ 0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
+ 0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
+ 0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
+ 0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
+ 0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0
+ 0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1
+ 0x00000000, // PA_SC_SHADER_CONTROL
+ 0x00000003, // PA_SC_BINNER_CNTL_0
+ 0x00000000, // PA_SC_BINNER_CNTL_1
+ 0x00100000, // PA_SC_CONSERVATIVE_RASTERIZATION_CNTL
+ 0x00000000, // PA_SC_NGG_MODE_CNTL
+ 0x00000000, // PA_SC_BINNER_CNTL_2
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR0_BASE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR0_VIEW
+ 0x00000000, // CB_COLOR0_INFO
+ 0x00000000, // CB_COLOR0_ATTRIB
+ 0x00000000, // CB_COLOR0_FDCC_CONTROL
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR0_DCC_BASE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR1_BASE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR1_VIEW
+ 0x00000000, // CB_COLOR1_INFO
+ 0x00000000, // CB_COLOR1_ATTRIB
+ 0x00000000, // CB_COLOR1_FDCC_CONTROL
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR1_DCC_BASE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR2_BASE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR2_VIEW
+ 0x00000000, // CB_COLOR2_INFO
+ 0x00000000, // CB_COLOR2_ATTRIB
+ 0x00000000, // CB_COLOR2_FDCC_CONTROL
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR2_DCC_BASE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR3_BASE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR3_VIEW
+ 0x00000000, // CB_COLOR3_INFO
+ 0x00000000, // CB_COLOR3_ATTRIB
+ 0x00000000, // CB_COLOR3_FDCC_CONTROL
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR3_DCC_BASE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR4_BASE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR4_VIEW
+ 0x00000000, // CB_COLOR4_INFO
+ 0x00000000, // CB_COLOR4_ATTRIB
+ 0x00000000, // CB_COLOR4_FDCC_CONTROL
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR4_DCC_BASE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR5_BASE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR5_VIEW
+ 0x00000000, // CB_COLOR5_INFO
+ 0x00000000, // CB_COLOR5_ATTRIB
+ 0x00000000, // CB_COLOR5_FDCC_CONTROL
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR5_DCC_BASE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR6_BASE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR6_VIEW
+ 0x00000000, // CB_COLOR6_INFO
+ 0x00000000, // CB_COLOR6_ATTRIB
+ 0x00000000, // CB_COLOR6_FDCC_CONTROL
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR6_DCC_BASE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR7_BASE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR7_VIEW
+ 0x00000000, // CB_COLOR7_INFO
+ 0x00000000, // CB_COLOR7_ATTRIB
+ 0x00000000, // CB_COLOR7_FDCC_CONTROL
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR7_DCC_BASE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR0_BASE_EXT
+ 0x00000000, // CB_COLOR1_BASE_EXT
+ 0x00000000, // CB_COLOR2_BASE_EXT
+ 0x00000000, // CB_COLOR3_BASE_EXT
+ 0x00000000, // CB_COLOR4_BASE_EXT
+ 0x00000000, // CB_COLOR5_BASE_EXT
+ 0x00000000, // CB_COLOR6_BASE_EXT
+ 0x00000000, // CB_COLOR7_BASE_EXT
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR0_DCC_BASE_EXT
+ 0x00000000, // CB_COLOR1_DCC_BASE_EXT
+ 0x00000000, // CB_COLOR2_DCC_BASE_EXT
+ 0x00000000, // CB_COLOR3_DCC_BASE_EXT
+ 0x00000000, // CB_COLOR4_DCC_BASE_EXT
+ 0x00000000, // CB_COLOR5_DCC_BASE_EXT
+ 0x00000000, // CB_COLOR6_DCC_BASE_EXT
+ 0x00000000, // CB_COLOR7_DCC_BASE_EXT
+ 0x00000000, // CB_COLOR0_ATTRIB2
+ 0x00000000, // CB_COLOR1_ATTRIB2
+ 0x00000000, // CB_COLOR2_ATTRIB2
+ 0x00000000, // CB_COLOR3_ATTRIB2
+ 0x00000000, // CB_COLOR4_ATTRIB2
+ 0x00000000, // CB_COLOR5_ATTRIB2
+ 0x00000000, // CB_COLOR6_ATTRIB2
+ 0x00000000, // CB_COLOR7_ATTRIB2
+ 0x00000000, // CB_COLOR0_ATTRIB3
+ 0x00000000, // CB_COLOR1_ATTRIB3
+ 0x00000000, // CB_COLOR2_ATTRIB3
+ 0x00000000, // CB_COLOR3_ATTRIB3
+ 0x00000000, // CB_COLOR4_ATTRIB3
+ 0x00000000, // CB_COLOR5_ATTRIB3
+ 0x00000000, // CB_COLOR6_ATTRIB3
+ 0x00000000, // CB_COLOR7_ATTRIB3
+};
+static const struct cs_extent_def gfx11_SECT_CONTEXT_defs[] =
+{
+ {gfx11_SECT_CONTEXT_def_1, 0x0000a000, 215 },
+ {gfx11_SECT_CONTEXT_def_2, 0x0000a0d8, 272 },
+ {gfx11_SECT_CONTEXT_def_3, 0x0000a1f5, 4 },
+ {gfx11_SECT_CONTEXT_def_4, 0x0000a1ff, 158 },
+ {gfx11_SECT_CONTEXT_def_5, 0x0000a2a0, 2 },
+ {gfx11_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
+ {gfx11_SECT_CONTEXT_def_7, 0x0000a2a6, 282 },
+ { 0, 0, 0 }
+};
+static const struct cs_section_def gfx11_cs_data[] = {
+ { gfx11_SECT_CONTEXT_defs, SECT_CONTEXT },
+ { 0, SECT_NONE }
+};
+
+#endif /* __CLEARSTATE_GFX11_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx12.h b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx12.h
new file mode 100644
index 000000000000..2f6c9d11d5ae
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx12.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __CLEARSTATE_GFX12_H_
+#define __CLEARSTATE_GFX12_H_
+
+static const unsigned int gfx12_SECT_CONTEXT_def_1[] = {
+0x00000000, //mmSC_MEM_TEMPORAL
+0x00000000, //mmSC_MEM_SPEC_READ
+0x00000000, //mmPA_SC_VPORT_0_TL
+0x00000000, //mmPA_SC_VPORT_0_BR
+0x00000000, //mmPA_SC_VPORT_1_TL
+0x00000000, //mmPA_SC_VPORT_1_BR
+0x00000000, //mmPA_SC_VPORT_2_TL
+0x00000000, //mmPA_SC_VPORT_2_BR
+0x00000000, //mmPA_SC_VPORT_3_TL
+0x00000000, //mmPA_SC_VPORT_3_BR
+0x00000000, //mmPA_SC_VPORT_4_TL
+0x00000000, //mmPA_SC_VPORT_4_BR
+0x00000000, //mmPA_SC_VPORT_5_TL
+0x00000000, //mmPA_SC_VPORT_5_BR
+0x00000000, //mmPA_SC_VPORT_6_TL
+0x00000000, //mmPA_SC_VPORT_6_BR
+0x00000000, //mmPA_SC_VPORT_7_TL
+0x00000000, //mmPA_SC_VPORT_7_BR
+0x00000000, //mmPA_SC_VPORT_8_TL
+0x00000000, //mmPA_SC_VPORT_8_BR
+0x00000000, //mmPA_SC_VPORT_9_TL
+0x00000000, //mmPA_SC_VPORT_9_BR
+0x00000000, //mmPA_SC_VPORT_10_TL
+0x00000000, //mmPA_SC_VPORT_10_BR
+0x00000000, //mmPA_SC_VPORT_11_TL
+0x00000000, //mmPA_SC_VPORT_11_BR
+0x00000000, //mmPA_SC_VPORT_12_TL
+0x00000000, //mmPA_SC_VPORT_12_BR
+0x00000000, //mmPA_SC_VPORT_13_TL
+0x00000000, //mmPA_SC_VPORT_13_BR
+0x00000000, //mmPA_SC_VPORT_14_TL
+0x00000000, //mmPA_SC_VPORT_14_BR
+0x00000000, //mmPA_SC_VPORT_15_TL
+0x00000000, //mmPA_SC_VPORT_15_BR
+};
+
+static const unsigned int gfx12_SECT_CONTEXT_def_2[] = {
+0x00000000, //mmPA_CL_PROG_NEAR_CLIP_Z
+0x00000000, //mmPA_RATE_CNTL
+};
+
+static const unsigned int gfx12_SECT_CONTEXT_def_3[] = {
+0x00000000, //mmCP_PERFMON_CNTX_CNTL
+};
+
+static const unsigned int gfx12_SECT_CONTEXT_def_4[] = {
+0x00000000, //mmCONTEXT_RESERVED_REG0
+0x00000000, //mmCONTEXT_RESERVED_REG1
+0x00000000, //mmPA_SC_CLIPRECT_0_EXT
+0x00000000, //mmPA_SC_CLIPRECT_1_EXT
+0x00000000, //mmPA_SC_CLIPRECT_2_EXT
+0x00000000, //mmPA_SC_CLIPRECT_3_EXT
+};
+
+static const unsigned int gfx12_SECT_CONTEXT_def_5[] = {
+0x00000000, //mmPA_SC_HIZ_INFO
+0x00000000, //mmPA_SC_HIS_INFO
+0x00000000, //mmPA_SC_HIZ_BASE
+0x00000000, //mmPA_SC_HIZ_BASE_EXT
+0x00000000, //mmPA_SC_HIZ_SIZE_XY
+0x00000000, //mmPA_SC_HIS_BASE
+0x00000000, //mmPA_SC_HIS_BASE_EXT
+0x00000000, //mmPA_SC_HIS_SIZE_XY
+0x00000000, //mmPA_SC_BINNER_OUTPUT_TIMEOUT_CNTL
+0x00000000, //mmPA_SC_BINNER_DYNAMIC_BATCH_LIMIT
+0x00000000, //mmPA_SC_HISZ_CONTROL
+};
+
+static const unsigned int gfx12_SECT_CONTEXT_def_6[] = {
+0x00000000, //mmCB_MEM0_INFO
+0x00000000, //mmCB_MEM1_INFO
+0x00000000, //mmCB_MEM2_INFO
+0x00000000, //mmCB_MEM3_INFO
+0x00000000, //mmCB_MEM4_INFO
+0x00000000, //mmCB_MEM5_INFO
+0x00000000, //mmCB_MEM6_INFO
+0x00000000, //mmCB_MEM7_INFO
+};
+
+static const struct cs_extent_def gfx12_SECT_CONTEXT_defs[] = {
+ {gfx12_SECT_CONTEXT_def_1, 0x0000a03e, 34 },
+ {gfx12_SECT_CONTEXT_def_2, 0x0000a0cc, 2 },
+ {gfx12_SECT_CONTEXT_def_3, 0x0000a0d8, 1 },
+ {gfx12_SECT_CONTEXT_def_4, 0x0000a0db, 6 },
+ {gfx12_SECT_CONTEXT_def_5, 0x0000a2e5, 11 },
+ {gfx12_SECT_CONTEXT_def_6, 0x0000a3c0, 8 },
+ { 0, 0, 0 }
+};
+
+static const struct cs_section_def gfx12_cs_data[] = {
+ { gfx12_SECT_CONTEXT_defs, SECT_CONTEXT },
+ { 0, SECT_NONE }
+};
+
+#endif /* __CLEARSTATE_GFX12_H_ */
diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
index 18fd01f3e4b2..9c85ca6358c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
+++ b/drivers/gpu/drm/amd/amdgpu/clearstate_gfx9.h
@@ -1,27 +1,27 @@
-
/*
-***************************************************************************************************
-*
-* Trade secret of Advanced Micro Devices, Inc.
-* Copyright (c) 2010 Advanced Micro Devices, Inc. (unpublished)
-*
-* All rights reserved. This notice is intended as a precaution against inadvertent publication and
-* does not imply publication or any waiver of confidentiality. The year included in the foregoing
-* notice is the year of creation of the work.
-*
-***************************************************************************************************
-*/
-/**
-***************************************************************************************************
-* @brief gfx9 Clearstate Definitions
-***************************************************************************************************
-*
-* Do not edit! This is a machine-generated file!
-*
-*/
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
-static const unsigned int gfx9_SECT_CONTEXT_def_1[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_1[] = {
0x00000000, // DB_RENDER_CONTROL
0x00000000, // DB_COUNT_CONTROL
0x00000000, // DB_DEPTH_VIEW
@@ -47,7 +47,7 @@ static const unsigned int gfx9_SECT_CONTEXT_def_1[] =
0x00000000, // DB_STENCIL_WRITE_BASE
0x00000000, // DB_STENCIL_WRITE_BASE_HI
0x00000000, // DB_DFSM_CONTROL
- 0x00000000, // DB_RENDER_FILTER
+ 0, // HOLE
0x00000000, // DB_Z_INFO2
0x00000000, // DB_STENCIL_INFO2
0, // HOLE
@@ -235,8 +235,7 @@ static const unsigned int gfx9_SECT_CONTEXT_def_1[] =
0x00000000, // PA_SC_VPORT_ZMIN_15
0x3f800000, // PA_SC_VPORT_ZMAX_15
};
-static const unsigned int gfx9_SECT_CONTEXT_def_2[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_2[] = {
0x00000000, // PA_SC_SCREEN_EXTENT_CONTROL
0x00000000, // PA_SC_TILE_STEERING_OVERRIDE
0x00000000, // CP_PERFMON_CNTX_CNTL
@@ -258,8 +257,8 @@ static const unsigned int gfx9_SECT_CONTEXT_def_2[] =
0x00000000, // PA_SC_RIGHT_VERT_GRID
0x00000000, // PA_SC_LEFT_VERT_GRID
0x00000000, // PA_SC_HORIZ_GRID
- 0x00000000, // PA_SC_FOV_WINDOW_LR
- 0x00000000, // PA_SC_FOV_WINDOW_TB
+ 0, // HOLE
+ 0, // HOLE
0, // HOLE
0, // HOLE
0, // HOLE
@@ -520,15 +519,13 @@ static const unsigned int gfx9_SECT_CONTEXT_def_2[] =
0x00000000, // CB_MRT6_EPITCH
0x00000000, // CB_MRT7_EPITCH
};
-static const unsigned int gfx9_SECT_CONTEXT_def_3[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_3[] = {
0x00000000, // PA_CL_POINT_X_RAD
0x00000000, // PA_CL_POINT_Y_RAD
0x00000000, // PA_CL_POINT_SIZE
0x00000000, // PA_CL_POINT_CULL_RAD
};
-static const unsigned int gfx9_SECT_CONTEXT_def_4[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_4[] = {
0x00000000, // DB_DEPTH_CONTROL
0x00000000, // DB_EQAA
0x00000000, // CB_COLOR_CONTROL
@@ -687,20 +684,17 @@ static const unsigned int gfx9_SECT_CONTEXT_def_4[] =
0x00000000, // VGT_GS_OUT_PRIM_TYPE
0x00000000, // IA_ENHANCE
};
-static const unsigned int gfx9_SECT_CONTEXT_def_5[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_5[] = {
0x00000000, // WD_ENHANCE
0x00000000, // VGT_PRIMITIVEID_EN
};
-static const unsigned int gfx9_SECT_CONTEXT_def_6[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_6[] = {
0x00000000, // VGT_PRIMITIVEID_RESET
};
-static const unsigned int gfx9_SECT_CONTEXT_def_7[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_7[] = {
0x00000000, // VGT_GS_MAX_PRIMS_PER_SUBGROUP
0x00000000, // VGT_DRAW_PAYLOAD_CNTL
- 0x00000000, // VGT_INDEX_PAYLOAD_CNTL
+ 0, // HOLE
0x00000000, // VGT_INSTANCE_STEP_RATE_0
0x00000000, // VGT_INSTANCE_STEP_RATE_1
0, // HOLE
@@ -765,8 +759,7 @@ static const unsigned int gfx9_SECT_CONTEXT_def_7[] =
0x00000000, // VGT_STRMOUT_CONFIG
0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
};
-static const unsigned int gfx9_SECT_CONTEXT_def_8[] =
-{
+static const unsigned int gfx9_SECT_CONTEXT_def_8[] = {
0x00000000, // PA_SC_CENTROID_PRIORITY_0
0x00000000, // PA_SC_CENTROID_PRIORITY_1
0x00001000, // PA_SC_LINE_CNTL
@@ -923,8 +916,7 @@ static const unsigned int gfx9_SECT_CONTEXT_def_8[] =
0x00000000, // CB_COLOR7_DCC_BASE
0x00000000, // CB_COLOR7_DCC_BASE_EXT
};
-static const struct cs_extent_def gfx9_SECT_CONTEXT_defs[] =
-{
+static const struct cs_extent_def gfx9_SECT_CONTEXT_defs[] = {
{gfx9_SECT_CONTEXT_def_1, 0x0000a000, 212 },
{gfx9_SECT_CONTEXT_def_2, 0x0000a0d6, 282 },
{gfx9_SECT_CONTEXT_def_3, 0x0000a1f5, 4 },
diff --git a/drivers/gpu/drm/amd/amdgpu/clearstate_si.h b/drivers/gpu/drm/amd/amdgpu/clearstate_si.h
index 66e39cdb5cb0..5fd96ddd7f0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/clearstate_si.h
+++ b/drivers/gpu/drm/amd/amdgpu/clearstate_si.h
@@ -21,8 +21,7 @@
*
*/
-static const u32 si_SECT_CONTEXT_def_1[] =
-{
+static const u32 si_SECT_CONTEXT_def_1[] = {
0x00000000, // DB_RENDER_CONTROL
0x00000000, // DB_COUNT_CONTROL
0x00000000, // DB_DEPTH_VIEW
@@ -236,8 +235,7 @@ static const u32 si_SECT_CONTEXT_def_1[] =
0x00000000, // PA_SC_VPORT_ZMIN_15
0x3f800000, // PA_SC_VPORT_ZMAX_15
};
-static const u32 si_SECT_CONTEXT_def_2[] =
-{
+static const u32 si_SECT_CONTEXT_def_2[] = {
0x00000000, // CP_PERFMON_CNTX_CNTL
0x00000000, // CP_RINGID
0x00000000, // CP_VMID
@@ -511,8 +509,7 @@ static const u32 si_SECT_CONTEXT_def_2[] =
0x00000000, // CB_BLEND6_CONTROL
0x00000000, // CB_BLEND7_CONTROL
};
-static const u32 si_SECT_CONTEXT_def_3[] =
-{
+static const u32 si_SECT_CONTEXT_def_3[] = {
0x00000000, // PA_CL_POINT_X_RAD
0x00000000, // PA_CL_POINT_Y_RAD
0x00000000, // PA_CL_POINT_SIZE
@@ -520,8 +517,7 @@ static const u32 si_SECT_CONTEXT_def_3[] =
0x00000000, // VGT_DMA_BASE_HI
0x00000000, // VGT_DMA_BASE
};
-static const u32 si_SECT_CONTEXT_def_4[] =
-{
+static const u32 si_SECT_CONTEXT_def_4[] = {
0x00000000, // DB_DEPTH_CONTROL
0x00000000, // DB_EQAA
0x00000000, // CB_COLOR_CONTROL
@@ -680,16 +676,13 @@ static const u32 si_SECT_CONTEXT_def_4[] =
0x00000000, // VGT_GS_OUT_PRIM_TYPE
0x00000000, // IA_ENHANCE
};
-static const u32 si_SECT_CONTEXT_def_5[] =
-{
+static const u32 si_SECT_CONTEXT_def_5[] = {
0x00000000, // VGT_PRIMITIVEID_EN
};
-static const u32 si_SECT_CONTEXT_def_6[] =
-{
+static const u32 si_SECT_CONTEXT_def_6[] = {
0x00000000, // VGT_PRIMITIVEID_RESET
};
-static const u32 si_SECT_CONTEXT_def_7[] =
-{
+static const u32 si_SECT_CONTEXT_def_7[] = {
0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
0, // HOLE
0, // HOLE
@@ -924,8 +917,7 @@ static const u32 si_SECT_CONTEXT_def_7[] =
0x00000000, // CB_COLOR7_CLEAR_WORD0
0x00000000, // CB_COLOR7_CLEAR_WORD1
};
-static const struct cs_extent_def si_SECT_CONTEXT_defs[] =
-{
+static const struct cs_extent_def si_SECT_CONTEXT_defs[] = {
{si_SECT_CONTEXT_def_1, 0x0000a000, 212 },
{si_SECT_CONTEXT_def_2, 0x0000a0d8, 272 },
{si_SECT_CONTEXT_def_3, 0x0000a1f5, 6 },
diff --git a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
new file mode 100644
index 000000000000..96616a865aac
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "nv.h"
+
+#include "soc15_common.h"
+#include "soc15_hw_ip.h"
+#include "cyan_skillfish_ip_offset.h"
+
+int cyan_skillfish_reg_base_init(struct amdgpu_device *adev)
+{
+ /* HW has more IP blocks, only initialized the blocke needed by driver */
+ uint32_t i;
+
+ adev->gfx.xcc_mask = 1;
+ for (i = 0 ; i < MAX_INSTANCE ; ++i) {
+ adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
+ adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
+ adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
+ adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
+ adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
+ adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
+ adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i]));
+ adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
+ adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i]));
+ adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
+ adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
+ adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+ adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index a5f294ebff5c..2f891fb846d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -20,7 +20,9 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "drmP.h"
+
+#include <linux/pci.h>
+
#include "amdgpu.h"
#include "amdgpu_ih.h"
#include "vid.h"
@@ -103,15 +105,15 @@ static void cz_ih_disable_interrupts(struct amdgpu_device *adev)
*/
static int cz_ih_irq_init(struct amdgpu_device *adev)
{
- int rb_bufsz;
+ struct amdgpu_ih_ring *ih = &adev->irq.ih;
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
- u64 wptr_off;
+ int rb_bufsz;
/* disable irqs */
cz_ih_disable_interrupts(adev);
/* setup interrupt control */
- WREG32(mmINTERRUPT_CNTL2, adev->dummy_page.addr >> 8);
+ WREG32(mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
interrupt_cntl = RREG32(mmINTERRUPT_CNTL);
/* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi
* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN
@@ -133,9 +135,8 @@ static int cz_ih_irq_init(struct amdgpu_device *adev)
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1);
/* set the writeback address whether it's enabled or not */
- wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
- WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(wptr_off));
- WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(wptr_off) & 0xFF);
+ WREG32(mmIH_RB_WPTR_ADDR_LO, lower_32_bits(ih->wptr_addr));
+ WREG32(mmIH_RB_WPTR_ADDR_HI, upper_32_bits(ih->wptr_addr) & 0xFF);
WREG32(mmIH_RB_CNTL, ih_rb_cntl);
@@ -178,6 +179,7 @@ static void cz_ih_irq_disable(struct amdgpu_device *adev)
* cz_ih_get_wptr - get the IH ring buffer wptr
*
* @adev: amdgpu_device pointer
+ * @ih: IH ring buffer to fetch wptr
*
* Get the IH ring buffer wptr from either the register
* or the writeback memory buffer (VI). Also check for
@@ -185,74 +187,96 @@ static void cz_ih_irq_disable(struct amdgpu_device *adev)
* Used by cz_irq_process(VI).
* Returns the value of the wptr.
*/
-static u32 cz_ih_get_wptr(struct amdgpu_device *adev)
+static u32 cz_ih_get_wptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
{
u32 wptr, tmp;
- wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
-
- if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
- wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
- /* When a ring buffer overflow happen start parsing interrupt
- * from the last not overwritten vector (wptr + 16). Hopefully
- * this should allow us to catchup.
- */
- dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
- wptr, adev->irq.ih.rptr, (wptr + 16) & adev->irq.ih.ptr_mask);
- adev->irq.ih.rptr = (wptr + 16) & adev->irq.ih.ptr_mask;
- tmp = RREG32(mmIH_RB_CNTL);
- tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
- WREG32(mmIH_RB_CNTL, tmp);
- }
- return (wptr & adev->irq.ih.ptr_mask);
+ wptr = le32_to_cpu(*ih->wptr_cpu);
+
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ /* Double check that the overflow wasn't already cleared. */
+ wptr = RREG32(mmIH_RB_WPTR);
+
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+
+ wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
+
+ /* When a ring buffer overflow happen start parsing interrupt
+ * from the last not overwritten vector (wptr + 16). Hopefully
+ * this should allow us to catchup.
+ */
+ dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
+ wptr, ih->rptr, (wptr + 16) & ih->ptr_mask);
+ ih->rptr = (wptr + 16) & ih->ptr_mask;
+ tmp = RREG32(mmIH_RB_CNTL);
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
+ WREG32(mmIH_RB_CNTL, tmp);
+
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32(mmIH_RB_CNTL, tmp);
+
+out:
+ return (wptr & ih->ptr_mask);
}
/**
* cz_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
+ * @ih: IH ring buffer to decode
+ * @entry: IV entry to place decoded information into
*
* Decodes the interrupt vector at the current rptr
* position and also advance the position.
*/
static void cz_ih_decode_iv(struct amdgpu_device *adev,
- struct amdgpu_iv_entry *entry)
+ struct amdgpu_ih_ring *ih,
+ struct amdgpu_iv_entry *entry)
{
/* wptr/rptr are in bytes! */
- u32 ring_index = adev->irq.ih.rptr >> 2;
+ u32 ring_index = ih->rptr >> 2;
uint32_t dw[4];
- dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
- dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
- dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
- dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+ dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
+ dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
+ dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
+ dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
- entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
+ entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
entry->src_id = dw[0] & 0xff;
entry->src_data[0] = dw[1] & 0xfffffff;
entry->ring_id = dw[2] & 0xff;
- entry->vm_id = (dw[2] >> 8) & 0xff;
- entry->pas_id = (dw[2] >> 16) & 0xffff;
+ entry->vmid = (dw[2] >> 8) & 0xff;
+ entry->pasid = (dw[2] >> 16) & 0xffff;
/* wptr/rptr are in bytes! */
- adev->irq.ih.rptr += 16;
+ ih->rptr += 16;
}
/**
* cz_ih_set_rptr - set the IH ring buffer rptr
*
* @adev: amdgpu_device pointer
+ * @ih: IH ring buffer to set rptr
*
* Set the IH ring buffer rptr.
*/
-static void cz_ih_set_rptr(struct amdgpu_device *adev)
+static void cz_ih_set_rptr(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
{
- WREG32(mmIH_RB_RPTR, adev->irq.ih.rptr);
+ WREG32(mmIH_RB_RPTR, ih->rptr);
}
-static int cz_ih_early_init(void *handle)
+static int cz_ih_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
ret = amdgpu_irq_add_domain(adev);
@@ -264,12 +288,12 @@ static int cz_ih_early_init(void *handle)
return 0;
}
-static int cz_ih_sw_init(void *handle)
+static int cz_ih_sw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
- r = amdgpu_ih_ring_init(adev, 64 * 1024, false);
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false);
if (r)
return r;
@@ -278,21 +302,20 @@ static int cz_ih_sw_init(void *handle)
return r;
}
-static int cz_ih_sw_fini(void *handle)
+static int cz_ih_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
- amdgpu_irq_fini(adev);
- amdgpu_ih_ring_fini(adev);
+ amdgpu_irq_fini_sw(adev);
amdgpu_irq_remove_domain(adev);
return 0;
}
-static int cz_ih_hw_init(void *handle)
+static int cz_ih_hw_init(struct amdgpu_ip_block *ip_block)
{
int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
r = cz_ih_irq_init(adev);
if (r)
@@ -301,32 +324,26 @@ static int cz_ih_hw_init(void *handle)
return 0;
}
-static int cz_ih_hw_fini(void *handle)
+static int cz_ih_hw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- cz_ih_irq_disable(adev);
+ cz_ih_irq_disable(ip_block->adev);
return 0;
}
-static int cz_ih_suspend(void *handle)
+static int cz_ih_suspend(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return cz_ih_hw_fini(adev);
+ return cz_ih_hw_fini(ip_block);
}
-static int cz_ih_resume(void *handle)
+static int cz_ih_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- return cz_ih_hw_init(adev);
+ return cz_ih_hw_init(ip_block);
}
-static bool cz_ih_is_idle(void *handle)
+static bool cz_ih_is_idle(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (REG_GET_FIELD(tmp, SRBM_STATUS, IH_BUSY))
@@ -335,11 +352,11 @@ static bool cz_ih_is_idle(void *handle)
return true;
}
-static int cz_ih_wait_for_idle(void *handle)
+static int cz_ih_wait_for_idle(struct amdgpu_ip_block *ip_block)
{
unsigned i;
u32 tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->usec_timeout; i++) {
/* read MC_STATUS */
@@ -351,10 +368,10 @@ static int cz_ih_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int cz_ih_soft_reset(void *handle)
+static int cz_ih_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 srbm_soft_reset = 0;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
u32 tmp = RREG32(mmSRBM_STATUS);
if (tmp & SRBM_STATUS__IH_BUSY_MASK)
@@ -381,14 +398,14 @@ static int cz_ih_soft_reset(void *handle)
return 0;
}
-static int cz_ih_set_clockgating_state(void *handle,
+static int cz_ih_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
// TODO
return 0;
}
-static int cz_ih_set_powergating_state(void *handle,
+static int cz_ih_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
// TODO
@@ -398,7 +415,6 @@ static int cz_ih_set_powergating_state(void *handle,
static const struct amd_ip_funcs cz_ih_ip_funcs = {
.name = "cz_ih",
.early_init = cz_ih_early_init,
- .late_init = NULL,
.sw_init = cz_ih_sw_init,
.sw_fini = cz_ih_sw_fini,
.hw_init = cz_ih_hw_init,
@@ -420,8 +436,7 @@ static const struct amdgpu_ih_funcs cz_ih_funcs = {
static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev)
{
- if (adev->irq.ih_funcs == NULL)
- adev->irq.ih_funcs = &cz_ih_funcs;
+ adev->irq.ih_funcs = &cz_ih_funcs;
}
const struct amdgpu_ip_block_version cz_ih_ip_block =
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 0cdeb6a2e4a0..72ca6538b2e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -20,7 +20,13 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "drmP.h"
+
+#include <drm/drm_edid.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_vblank.h>
+
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_i2c.h"
@@ -31,6 +37,7 @@
#include "atombios_encoders.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
+#include "amdgpu_display.h"
#include "dce_v10_0.h"
#include "dce/dce_10_0_d.h"
@@ -41,11 +48,13 @@
#include "gmc/gmc_8_1_d.h"
#include "gmc/gmc_8_1_sh_mask.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
+
static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev);
static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev);
+static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev, int hpd);
-static const u32 crtc_offsets[] =
-{
+static const u32 crtc_offsets[] = {
CRTC0_REGISTER_OFFSET,
CRTC1_REGISTER_OFFSET,
CRTC2_REGISTER_OFFSET,
@@ -55,8 +64,7 @@ static const u32 crtc_offsets[] =
CRTC6_REGISTER_OFFSET
};
-static const u32 hpd_offsets[] =
-{
+static const u32 hpd_offsets[] = {
HPD0_REGISTER_OFFSET,
HPD1_REGISTER_OFFSET,
HPD2_REGISTER_OFFSET,
@@ -113,30 +121,26 @@ static const struct {
.hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
} };
-static const u32 golden_settings_tonga_a11[] =
-{
+static const u32 golden_settings_tonga_a11[] = {
mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
mmFBC_MISC, 0x1f311fff, 0x12300000,
mmHDMI_CONTROL, 0x31000111, 0x00000011,
};
-static const u32 tonga_mgcg_cgcg_init[] =
-{
+static const u32 tonga_mgcg_cgcg_init[] = {
mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
};
-static const u32 golden_settings_fiji_a10[] =
-{
+static const u32 golden_settings_fiji_a10[] = {
mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
mmFBC_MISC, 0x1f311fff, 0x12300000,
mmHDMI_CONTROL, 0x31000111, 0x00000011,
};
-static const u32 fiji_mgcg_cgcg_init[] =
-{
+static const u32 fiji_mgcg_cgcg_init[] = {
mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
};
@@ -145,20 +149,20 @@ static void dce_v10_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_FIJI:
- amdgpu_program_register_sequence(adev,
- fiji_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(fiji_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_fiji_a10,
- (const u32)ARRAY_SIZE(golden_settings_fiji_a10));
+ amdgpu_device_program_register_sequence(adev,
+ fiji_mgcg_cgcg_init,
+ ARRAY_SIZE(fiji_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_fiji_a10,
+ ARRAY_SIZE(golden_settings_fiji_a10));
break;
case CHIP_TONGA:
- amdgpu_program_register_sequence(adev,
- tonga_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(tonga_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- golden_settings_tonga_a11,
- (const u32)ARRAY_SIZE(golden_settings_tonga_a11));
+ amdgpu_device_program_register_sequence(adev,
+ tonga_mgcg_cgcg_init,
+ ARRAY_SIZE(tonga_mgcg_cgcg_init));
+ amdgpu_device_program_register_sequence(adev,
+ golden_settings_tonga_a11,
+ ARRAY_SIZE(golden_settings_tonga_a11));
break;
default:
break;
@@ -190,66 +194,6 @@ static void dce_v10_0_audio_endpt_wreg(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
}
-static bool dce_v10_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
-{
- if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
- CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
- return true;
- else
- return false;
-}
-
-static bool dce_v10_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
-{
- u32 pos1, pos2;
-
- pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
- pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
-
- if (pos1 != pos2)
- return true;
- else
- return false;
-}
-
-/**
- * dce_v10_0_vblank_wait - vblank wait asic callback.
- *
- * @adev: amdgpu_device pointer
- * @crtc: crtc to wait for vblank on
- *
- * Wait for vblank on the requested crtc (evergreen+).
- */
-static void dce_v10_0_vblank_wait(struct amdgpu_device *adev, int crtc)
-{
- unsigned i = 100;
-
- if (crtc >= adev->mode_info.num_crtc)
- return;
-
- if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
- return;
-
- /* depending on when we hit vblank, we may be close to active; if so,
- * wait for another frame.
- */
- while (dce_v10_0_is_in_vblank(adev, crtc)) {
- if (i++ == 100) {
- i = 0;
- if (!dce_v10_0_is_counter_moving(adev, crtc))
- break;
- }
- }
-
- while (!dce_v10_0_is_in_vblank(adev, crtc)) {
- if (i++ == 100) {
- i = 0;
- if (!dce_v10_0_is_counter_moving(adev, crtc))
- break;
- }
- }
-}
-
static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
{
if (crtc >= adev->mode_info.num_crtc)
@@ -282,6 +226,7 @@ static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
* @crtc_id: crtc to cleanup pageflip on
* @crtc_base: new address of the crtc (GPU MC address)
+ * @async: asynchronous flip
*
* Triggers the actual pageflip by updating the primary
* surface base address.
@@ -290,6 +235,7 @@ static void dce_v10_0_page_flip(struct amdgpu_device *adev,
int crtc_id, u64 crtc_base, bool async)
{
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+ struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
u32 tmp;
/* flip at hsync for async, default is vsync */
@@ -297,6 +243,9 @@ static void dce_v10_0_page_flip(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0);
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+ /* update pitch */
+ WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
+ fb->pitches[0] / fb->format->cpp[0]);
/* update the primary scanout address */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(crtc_base));
@@ -378,11 +327,13 @@ static void dce_v10_0_hpd_set_polarity(struct amdgpu_device *adev,
*/
static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -414,10 +365,12 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
+ dce_v10_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
dce_v10_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq,
amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -430,11 +383,13 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev)
*/
static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -447,6 +402,7 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->hpd_irq,
amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
static u32 dce_v10_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
@@ -484,134 +440,6 @@ static bool dce_v10_0_is_display_hung(struct amdgpu_device *adev)
return true;
}
-static void dce_v10_0_stop_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- u32 crtc_enabled, tmp;
- int i;
-
- save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
- save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
-
- /* disable VGA render */
- tmp = RREG32(mmVGA_RENDER_CONTROL);
- tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
- WREG32(mmVGA_RENDER_CONTROL, tmp);
-
- /* blank the display controllers */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
- CRTC_CONTROL, CRTC_MASTER_EN);
- if (crtc_enabled) {
-#if 0
- u32 frame_count;
- int j;
-
- save->crtc_enabled[i] = true;
- tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
- amdgpu_display_vblank_wait(adev, i);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
- WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- }
- /* wait for the next frame */
- frame_count = amdgpu_display_vblank_get_counter(adev, i);
- for (j = 0; j < adev->usec_timeout; j++) {
- if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
- break;
- udelay(1);
- }
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) {
- tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
- WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) {
- tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1);
- WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
- }
-#else
- /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
- tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
- WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- save->crtc_enabled[i] = false;
- /* ***** */
-#endif
- } else {
- save->crtc_enabled[i] = false;
- }
- }
-}
-
-static void dce_v10_0_resume_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- u32 tmp, frame_count;
- int i, j;
-
- /* update crtc base addresses */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
- upper_32_bits(adev->mc.vram_start));
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
- upper_32_bits(adev->mc.vram_start));
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
- (u32)adev->mc.vram_start);
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
- (u32)adev->mc.vram_start);
-
- if (save->crtc_enabled[i]) {
- tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 0) {
- tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 0);
- WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) {
- tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
- WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) {
- tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0);
- WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
- }
- for (j = 0; j < adev->usec_timeout; j++) {
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0)
- break;
- udelay(1);
- }
- tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
- tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- /* wait for the next frame */
- frame_count = amdgpu_display_vblank_get_counter(adev, i);
- for (j = 0; j < adev->usec_timeout; j++) {
- if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
- break;
- udelay(1);
- }
- }
- }
-
- WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
- WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
-
- /* Unlock vga access */
- WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
- mdelay(1);
- WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
-}
-
static void dce_v10_0_set_vga_render_state(struct amdgpu_device *adev,
bool render)
{
@@ -676,7 +504,7 @@ void dce_v10_0_disable_dce(struct amdgpu_device *adev)
static void dce_v10_0_program_fmt(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -1207,8 +1035,11 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min_t(u32, line_time, 65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
@@ -1238,7 +1069,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
wm_high.num_heads = num_heads;
/* set for high clocks */
- latency_watermark_a = min(dce_v10_0_latency_watermark(&wm_high), (u32)65535);
+ latency_watermark_a = min_t(u32, dce_v10_0_latency_watermark(&wm_high), 65535);
/* possibly force display priority to high */
/* should really do this at mode validation time... */
@@ -1277,7 +1108,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
wm_low.num_heads = num_heads;
/* set for low clocks */
- latency_watermark_b = min(dce_v10_0_latency_watermark(&wm_low), (u32)65535);
+ latency_watermark_b = min_t(u32, dce_v10_0_latency_watermark(&wm_low), 65535);
/* possibly force display priority to high */
/* should really do this at mode validation time... */
@@ -1310,8 +1141,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
/* save values for DPM */
amdgpu_crtc->line_time = line_time;
- amdgpu_crtc->wm_high = latency_watermark_a;
- amdgpu_crtc->wm_low = latency_watermark_b;
+
/* Save number of lines the linebuffer leads before the scanout */
amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}
@@ -1330,7 +1160,7 @@ static void dce_v10_0_bandwidth_update(struct amdgpu_device *adev)
u32 num_heads = 0, lb_size;
int i;
- amdgpu_update_display_priority(adev);
+ amdgpu_display_update_priority(adev);
for (i = 0; i < adev->mode_info.num_crtc; i++) {
if (adev->mode_info.crtcs[i]->base.enabled)
@@ -1378,7 +1208,7 @@ static struct amdgpu_audio_pin *dce_v10_0_audio_get_pin(struct amdgpu_device *ad
static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 tmp;
@@ -1394,10 +1224,12 @@ static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder)
static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp;
int interlace = 0;
@@ -1405,12 +1237,14 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1436,10 +1270,12 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp;
u8 *sadb = NULL;
@@ -1448,19 +1284,21 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
return;
}
- sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
+ sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb);
if (sad_count < 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
sad_count = 0;
@@ -1488,10 +1326,12 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder
static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
struct cea_sad *sads;
int i, sad_count;
@@ -1514,23 +1354,25 @@ static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
return;
}
- sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads);
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
BUG_ON(!sads);
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
@@ -1579,8 +1421,7 @@ static void dce_v10_0_audio_enable(struct amdgpu_device *adev,
enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
}
-static const u32 pin_offsets[] =
-{
+static const u32 pin_offsets[] = {
AUD0_REGISTER_OFFSET,
AUD1_REGISTER_OFFSET,
AUD2_REGISTER_OFFSET,
@@ -1620,17 +1461,12 @@ static int dce_v10_0_audio_init(struct amdgpu_device *adev)
static void dce_v10_0_audio_fini(struct amdgpu_device *adev)
{
- int i;
-
if (!amdgpu_audio)
return;
if (!adev->mode_info.audio.enabled)
return;
- for (i = 0; i < adev->mode_info.audio.num_pins; i++)
- dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
-
adev->mode_info.audio.enabled = false;
}
@@ -1640,7 +1476,7 @@ static void dce_v10_0_audio_fini(struct amdgpu_device *adev)
static void dce_v10_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -1676,7 +1512,7 @@ static void dce_v10_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
void *buffer, size_t size)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
uint8_t *frame = buffer + 3;
@@ -1695,7 +1531,7 @@ static void dce_v10_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
static void dce_v10_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
@@ -1726,7 +1562,7 @@ static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -1864,7 +1700,7 @@ static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder,
dce_v10_0_audio_write_sad_regs(encoder);
dce_v10_0_audio_write_latency_fields(encoder, mode);
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
return;
@@ -1906,7 +1742,7 @@ static void dce_v10_0_afmt_setmode(struct drm_encoder *encoder,
static void dce_v10_0_afmt_enable(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -1965,8 +1801,7 @@ static void dce_v10_0_afmt_fini(struct amdgpu_device *adev)
}
}
-static const u32 vga_control_regs[6] =
-{
+static const u32 vga_control_regs[6] = {
mmD1VGA_CONTROL,
mmD2VGA_CONTROL,
mmD3VGA_CONTROL,
@@ -1979,7 +1814,7 @@ static void dce_v10_0_vga_enable(struct drm_crtc *crtc, bool enable)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u32 vga_control;
vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
@@ -1993,7 +1828,7 @@ static void dce_v10_0_grph_enable(struct drm_crtc *crtc, bool enable)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
if (enable)
WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
@@ -2007,8 +1842,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_framebuffer *amdgpu_fb;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct amdgpu_bo *abo;
@@ -2019,7 +1853,6 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -2027,32 +1860,29 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
return 0;
}
- if (atomic) {
- amdgpu_fb = to_amdgpu_framebuffer(fb);
+ if (atomic)
target_fb = fb;
- } else {
- amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+ else
target_fb = crtc->primary->fb;
- }
/* If atomic, assume fb object is pinned & idle & fenced and
* just update base pointers
*/
- obj = amdgpu_fb->obj;
+ obj = target_fb->obj[0];
abo = gem_to_amdgpu_bo(obj);
r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
return r;
- if (atomic) {
- fb_location = amdgpu_bo_gpu_offset(abo);
- } else {
- r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+ if (!atomic) {
+ abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
+ fb_location = amdgpu_bo_gpu_offset(abo);
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
amdgpu_bo_unreserve(abo);
@@ -2130,9 +1960,20 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
bypass_lut = true;
break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
+ fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
+ fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, 2);
+ fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, 2);
+#ifdef __BIG_ENDIAN
+ fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
+ ENDIAN_8IN32);
+#endif
+ break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->format->format, &format_name));
+ DRM_ERROR("Unsupported screen format %p4cc\n",
+ &target_fb->format->format);
return -EINVAL;
}
@@ -2228,8 +2069,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
if (!atomic && fb && fb != crtc->primary->fb) {
- amdgpu_fb = to_amdgpu_framebuffer(fb);
- abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ abo = gem_to_amdgpu_bo(fb->obj[0]);
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0))
return r;
@@ -2247,7 +2087,7 @@ static void dce_v10_0_set_interleave(struct drm_crtc *crtc,
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
u32 tmp;
@@ -2263,7 +2103,8 @@ static void dce_v10_0_crtc_load_lut(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ u16 *r, *g, *b;
int i;
u32 tmp;
@@ -2301,11 +2142,14 @@ static void dce_v10_0_crtc_load_lut(struct drm_crtc *crtc)
WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
for (i = 0; i < 256; i++) {
WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
- (amdgpu_crtc->lut_r[i] << 20) |
- (amdgpu_crtc->lut_g[i] << 10) |
- (amdgpu_crtc->lut_b[i] << 0));
+ ((*r++ & 0xffc0) << 14) |
+ ((*g++ & 0xffc0) << 4) |
+ (*b++ >> 6));
}
tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
@@ -2350,22 +2194,18 @@ static int dce_v10_0_pick_dig_encoder(struct drm_encoder *encoder)
return 1;
else
return 0;
- break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
if (dig->linkb)
return 3;
else
return 2;
- break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
if (dig->linkb)
return 5;
else
return 4;
- break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
return 6;
- break;
default:
DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
return 0;
@@ -2398,7 +2238,7 @@ static u32 dce_v10_0_pick_pll(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u32 pll_in_use;
int pll;
@@ -2433,7 +2273,7 @@ static u32 dce_v10_0_pick_pll(struct drm_crtc *crtc)
static void dce_v10_0_lock_cursor(struct drm_crtc *crtc, bool lock)
{
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
uint32_t cur_lock;
@@ -2448,18 +2288,18 @@ static void dce_v10_0_lock_cursor(struct drm_crtc *crtc, bool lock)
static void dce_v10_0_hide_cursor(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
u32 tmp;
- tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
+ tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
}
static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
u32 tmp;
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
@@ -2467,17 +2307,17 @@ static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
lower_32_bits(amdgpu_crtc->cursor_addr));
- tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
+ tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
}
static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
int x, int y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
int xorigin = 0, yorigin = 0;
amdgpu_crtc->cursor_x = x;
@@ -2552,17 +2392,19 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
aobj = gem_to_amdgpu_bo(obj);
ret = amdgpu_bo_reserve(aobj, false);
if (ret != 0) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
- ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+ aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
+ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
dce_v10_0_lock_cursor(crtc, true);
@@ -2594,7 +2436,7 @@ unpin:
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
- drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
+ drm_gem_object_put(amdgpu_crtc->cursor_bo);
}
amdgpu_crtc->cursor_bo = obj;
@@ -2621,15 +2463,6 @@ static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int i;
-
- /* userspace palettes are always correct as is */
- for (i = 0; i < size; i++) {
- amdgpu_crtc->lut_r[i] = red[i] >> 6;
- amdgpu_crtc->lut_g[i] = green[i] >> 6;
- amdgpu_crtc->lut_b[i] = blue[i] >> 6;
- }
dce_v10_0_crtc_load_lut(crtc);
return 0;
@@ -2647,15 +2480,19 @@ static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
.cursor_set2 = dce_v10_0_crtc_cursor_set2,
.cursor_move = dce_v10_0_crtc_cursor_move,
.gamma_set = dce_v10_0_crtc_gamma_set,
- .set_config = amdgpu_crtc_set_config,
+ .set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v10_0_crtc_destroy,
- .page_flip_target = amdgpu_crtc_page_flip_target,
+ .page_flip_target = amdgpu_display_crtc_page_flip_target,
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
+ .enable_vblank = amdgpu_enable_vblank_kms,
+ .disable_vblank = amdgpu_disable_vblank_kms,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
unsigned type;
@@ -2667,7 +2504,8 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
dce_v10_0_vga_enable(crtc, false);
/* Make sure VBLANK and PFLIP interrupts are still enabled */
- type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+ type = amdgpu_display_crtc_idx_to_irq_type(adev,
+ amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
drm_crtc_vblank_on(crtc);
@@ -2687,7 +2525,7 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
break;
}
/* adjust pm to dpms */
- amdgpu_pm_compute_clocks(adev);
+ amdgpu_dpm_compute_clocks(adev);
}
static void dce_v10_0_crtc_prepare(struct drm_crtc *crtc)
@@ -2708,18 +2546,16 @@ static void dce_v10_0_crtc_disable(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_atom_ss ss;
int i;
dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
int r;
- struct amdgpu_framebuffer *amdgpu_fb;
struct amdgpu_bo *abo;
- amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
- abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n");
@@ -2806,7 +2642,7 @@ static bool dce_v10_0_crtc_mode_fixup(struct drm_crtc *crtc,
amdgpu_crtc->connector = NULL;
return false;
}
- if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
+ if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
return false;
@@ -2830,7 +2666,7 @@ static int dce_v10_0_crtc_set_base_atomic(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, enum mode_set_atomic state)
{
- return dce_v10_0_crtc_do_set_base(crtc, fb, x, y, 1);
+ return dce_v10_0_crtc_do_set_base(crtc, fb, x, y, 1);
}
static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = {
@@ -2841,21 +2677,46 @@ static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = {
.mode_set_base_atomic = dce_v10_0_crtc_set_base_atomic,
.prepare = dce_v10_0_crtc_prepare,
.commit = dce_v10_0_crtc_commit,
- .load_lut = dce_v10_0_crtc_load_lut,
.disable = dce_v10_0_crtc_disable,
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
+};
+
+static void dce_v10_0_panic_flush(struct drm_plane *plane)
+{
+ struct drm_framebuffer *fb;
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct amdgpu_device *adev;
+ uint32_t fb_format;
+
+ if (!plane->fb)
+ return;
+
+ fb = plane->fb;
+ amdgpu_crtc = to_amdgpu_crtc(plane->crtc);
+ adev = drm_to_adev(fb->dev);
+
+ /* Disable DC tiling */
+ fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
+ fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK;
+ WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
+
+}
+
+static const struct drm_plane_helper_funcs dce_v10_0_drm_primary_plane_helper_funcs = {
+ .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
+ .panic_flush = dce_v10_0_panic_flush,
};
static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
{
struct amdgpu_crtc *amdgpu_crtc;
- int i;
amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
(AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
if (amdgpu_crtc == NULL)
return -ENOMEM;
- drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v10_0_crtc_funcs);
+ drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v10_0_crtc_funcs);
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
amdgpu_crtc->crtc_id = index;
@@ -2863,14 +2724,8 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->max_cursor_width = 128;
amdgpu_crtc->max_cursor_height = 128;
- adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
- adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
-
- for (i = 0; i < 256; i++) {
- amdgpu_crtc->lut_r[i] = i << 2;
- amdgpu_crtc->lut_g[i] = i << 2;
- amdgpu_crtc->lut_b[i] = i << 2;
- }
+ adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
+ adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
switch (amdgpu_crtc->crtc_id) {
case 0:
@@ -2899,19 +2754,19 @@ static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v10_0_crtc_helper_funcs);
+ drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v10_0_drm_primary_plane_helper_funcs);
return 0;
}
-static int dce_v10_0_early_init(void *handle)
+static int dce_v10_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->audio_endpt_rreg = &dce_v10_0_audio_endpt_rreg;
adev->audio_endpt_wreg = &dce_v10_0_audio_endpt_wreg;
dce_v10_0_set_display_funcs(adev);
- dce_v10_0_set_irq_funcs(adev);
adev->mode_info.num_crtc = dce_v10_0_get_num_crtc(adev);
@@ -2926,49 +2781,51 @@ static int dce_v10_0_early_init(void *handle)
return -EINVAL;
}
+ dce_v10_0_set_irq_funcs(adev);
+
return 0;
}
-static int dce_v10_0_sw_init(void *handle)
+static int dce_v10_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
if (r)
return r;
}
- for (i = 8; i < 20; i += 2) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
+ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
if (r)
return r;
}
/* HPD hotplug */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
if (r)
return r;
- adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
+ adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
- adev->ddev->mode_config.async_page_flip = true;
+ adev_to_drm(adev)->mode_config.async_page_flip = true;
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
- adev->ddev->mode_config.preferred_depth = 24;
- adev->ddev->mode_config.prefer_shadow = 1;
+ adev_to_drm(adev)->mode_config.preferred_depth = 24;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
- adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+ adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
- r = amdgpu_modeset_create_props(adev);
+ r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
/* allocate crtcs */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
@@ -2978,7 +2835,7 @@ static int dce_v10_0_sw_init(void *handle)
}
if (amdgpu_atombios_get_connector_info_from_object_table(adev))
- amdgpu_print_display_setup(adev->ddev);
+ amdgpu_display_print_display_setup(adev_to_drm(adev));
else
return -EINVAL;
@@ -2991,37 +2848,50 @@ static int dce_v10_0_sw_init(void *handle)
if (r)
return r;
- drm_kms_helper_poll_init(adev->ddev);
+ /* Disable vblank IRQs aggressively for power-saving */
+ /* XXX: can this be enabled for DC? */
+ adev_to_drm(adev)->vblank_disable_immediate = true;
+
+ r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
+ if (r)
+ return r;
+
+ INIT_DELAYED_WORK(&adev->hotplug_work,
+ amdgpu_display_hotplug_work_func);
+
+ drm_kms_helper_poll_init(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = true;
return 0;
}
-static int dce_v10_0_sw_fini(void *handle)
+static int dce_v10_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
- kfree(adev->mode_info.bios_hardcoded_edid);
+ drm_edid_free(adev->mode_info.bios_hardcoded_edid);
- drm_kms_helper_poll_fini(adev->ddev);
+ drm_kms_helper_poll_fini(adev_to_drm(adev));
dce_v10_0_audio_fini(adev);
dce_v10_0_afmt_fini(adev);
- drm_mode_config_cleanup(adev->ddev);
+ drm_mode_config_cleanup(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = false;
return 0;
}
-static int dce_v10_0_hw_init(void *handle)
+static int dce_v10_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
dce_v10_0_init_golden_registers(adev);
+ /* disable vga render */
+ dce_v10_0_set_vga_render_state(adev, false);
/* init dig PHYs, disp eng pll */
amdgpu_atombios_encoder_init_dig(adev);
amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
@@ -3038,10 +2908,10 @@ static int dce_v10_0_hw_init(void *handle)
return 0;
}
-static int dce_v10_0_hw_fini(void *handle)
+static int dce_v10_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
dce_v10_0_hpd_fini(adev);
@@ -3051,20 +2921,35 @@ static int dce_v10_0_hw_fini(void *handle)
dce_v10_0_pageflip_interrupt_fini(adev);
+ flush_delayed_work(&adev->hotplug_work);
+
return 0;
}
-static int dce_v10_0_suspend(void *handle)
+static int dce_v10_0_suspend(struct amdgpu_ip_block *ip_block)
{
- return dce_v10_0_hw_fini(handle);
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_display_suspend_helper(adev);
+ if (r)
+ return r;
+
+ adev->mode_info.bl_level =
+ amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
+
+ return dce_v10_0_hw_fini(ip_block);
}
-static int dce_v10_0_resume(void *handle)
+static int dce_v10_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
- ret = dce_v10_0_hw_init(handle);
+ amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
+ adev->mode_info.bl_level);
+
+ ret = dce_v10_0_hw_init(ip_block);
/* turn on the BL */
if (adev->mode_info.bl_encoder) {
@@ -3073,31 +2958,28 @@ static int dce_v10_0_resume(void *handle)
amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
bl_level);
}
+ if (ret)
+ return ret;
- return ret;
+ return amdgpu_display_resume_helper(adev);
}
-static bool dce_v10_0_is_idle(void *handle)
+static bool dce_v10_0_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
-static int dce_v10_0_wait_for_idle(void *handle)
+static bool dce_v10_0_check_soft_reset(struct amdgpu_ip_block *ip_block)
{
- return 0;
-}
-
-static bool dce_v10_0_check_soft_reset(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
return dce_v10_0_is_display_hung(adev);
}
-static int dce_v10_0_soft_reset(void *handle)
+static int dce_v10_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
u32 srbm_soft_reset = 0, tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
if (dce_v10_0_is_display_hung(adev))
srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
@@ -3187,7 +3069,7 @@ static int dce_v10_0_set_hpd_irq_state(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return 0;
}
@@ -3306,14 +3188,14 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
if (amdgpu_crtc == NULL)
return 0;
- spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
works = amdgpu_crtc->pflip_works;
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
"AMDGPU_FLIP_SUBMITTED(%d)\n",
amdgpu_crtc->pflip_status,
AMDGPU_FLIP_SUBMITTED);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
return 0;
}
@@ -3325,7 +3207,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
if (works->event)
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
@@ -3339,7 +3221,7 @@ static void dce_v10_0_hpd_int_ack(struct amdgpu_device *adev,
u32 tmp;
if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return;
}
@@ -3384,7 +3266,7 @@ static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
{
unsigned crtc = entry->src_id - 1;
uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
- unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
+ unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, crtc);
switch (entry->src_data[0]) {
case 0: /* vblank */
@@ -3394,7 +3276,7 @@ static int dce_v10_0_crtc_irq(struct amdgpu_device *adev,
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
+ drm_handle_vblank(adev_to_drm(adev), crtc);
}
DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
@@ -3434,20 +3316,20 @@ static int dce_v10_0_hpd_irq(struct amdgpu_device *adev,
if (disp_int & mask) {
dce_v10_0_hpd_int_ack(adev, hpd);
- schedule_work(&adev->hotplug_work);
+ schedule_delayed_work(&adev->hotplug_work, 0);
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
}
return 0;
}
-static int dce_v10_0_set_clockgating_state(void *handle,
+static int dce_v10_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int dce_v10_0_set_powergating_state(void *handle,
+static int dce_v10_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -3456,7 +3338,6 @@ static int dce_v10_0_set_powergating_state(void *handle,
static const struct amd_ip_funcs dce_v10_0_ip_funcs = {
.name = "dce_v10_0",
.early_init = dce_v10_0_early_init,
- .late_init = NULL,
.sw_init = dce_v10_0_sw_init,
.sw_fini = dce_v10_0_sw_fini,
.hw_init = dce_v10_0_hw_init,
@@ -3464,7 +3345,6 @@ static const struct amd_ip_funcs dce_v10_0_ip_funcs = {
.suspend = dce_v10_0_suspend,
.resume = dce_v10_0_resume,
.is_idle = dce_v10_0_is_idle,
- .wait_for_idle = dce_v10_0_wait_for_idle,
.check_soft_reset = dce_v10_0_check_soft_reset,
.soft_reset = dce_v10_0_soft_reset,
.set_clockgating_state = dce_v10_0_set_clockgating_state,
@@ -3494,7 +3374,7 @@ dce_v10_0_encoder_mode_set(struct drm_encoder *encoder,
static void dce_v10_0_encoder_prepare(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -3534,7 +3414,7 @@ static void dce_v10_0_encoder_prepare(struct drm_encoder *encoder)
static void dce_v10_0_encoder_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
/* need to call this here as we need the crtc set up */
amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -3634,7 +3514,7 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
uint32_t supported_device,
u16 caps)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
@@ -3734,10 +3614,8 @@ static void dce_v10_0_encoder_add(struct amdgpu_device *adev,
}
static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
- .set_vga_render_state = &dce_v10_0_set_vga_render_state,
.bandwidth_update = &dce_v10_0_bandwidth_update,
.vblank_get_counter = &dce_v10_0_vblank_get_counter,
- .vblank_wait = &dce_v10_0_vblank_wait,
.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
.hpd_sense = &dce_v10_0_hpd_sense,
@@ -3747,14 +3625,11 @@ static const struct amdgpu_display_funcs dce_v10_0_display_funcs = {
.page_flip_get_scanoutpos = &dce_v10_0_crtc_get_scanoutpos,
.add_encoder = &dce_v10_0_encoder_add,
.add_connector = &amdgpu_connector_add,
- .stop_mc_access = &dce_v10_0_stop_mc_access,
- .resume_mc_access = &dce_v10_0_resume_mc_access,
};
static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev)
{
- if (adev->mode_info.funcs == NULL)
- adev->mode_info.funcs = &dce_v10_0_display_funcs;
+ adev->mode_info.funcs = &dce_v10_0_display_funcs;
}
static const struct amdgpu_irq_src_funcs dce_v10_0_crtc_irq_funcs = {
@@ -3774,18 +3649,20 @@ static const struct amdgpu_irq_src_funcs dce_v10_0_hpd_irq_funcs = {
static void dce_v10_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+ if (adev->mode_info.num_crtc > 0)
+ adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
+ else
+ adev->crtc_irq.num_types = 0;
adev->crtc_irq.funcs = &dce_v10_0_crtc_irq_funcs;
- adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+ adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
adev->pageflip_irq.funcs = &dce_v10_0_pageflip_irq_funcs;
- adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
+ adev->hpd_irq.num_types = adev->mode_info.num_hpd;
adev->hpd_irq.funcs = &dce_v10_0_hpd_irq_funcs;
}
-const struct amdgpu_ip_block_version dce_v10_0_ip_block =
-{
+const struct amdgpu_ip_block_version dce_v10_0_ip_block = {
.type = AMD_IP_BLOCK_TYPE_DCE,
.major = 10,
.minor = 0,
@@ -3793,8 +3670,7 @@ const struct amdgpu_ip_block_version dce_v10_0_ip_block =
.funcs = &dce_v10_0_ip_funcs,
};
-const struct amdgpu_ip_block_version dce_v10_1_ip_block =
-{
+const struct amdgpu_ip_block_version dce_v10_1_ip_block = {
.type = AMD_IP_BLOCK_TYPE_DCE,
.major = 10,
.minor = 1,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
deleted file mode 100644
index 773654a19749..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ /dev/null
@@ -1,3872 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "drmP.h"
-#include "amdgpu.h"
-#include "amdgpu_pm.h"
-#include "amdgpu_i2c.h"
-#include "vid.h"
-#include "atom.h"
-#include "amdgpu_atombios.h"
-#include "atombios_crtc.h"
-#include "atombios_encoders.h"
-#include "amdgpu_pll.h"
-#include "amdgpu_connectors.h"
-#include "dce_v11_0.h"
-
-#include "dce/dce_11_0_d.h"
-#include "dce/dce_11_0_sh_mask.h"
-#include "dce/dce_11_0_enum.h"
-#include "oss/oss_3_0_d.h"
-#include "oss/oss_3_0_sh_mask.h"
-#include "gmc/gmc_8_1_d.h"
-#include "gmc/gmc_8_1_sh_mask.h"
-
-static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev);
-static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev);
-
-static const u32 crtc_offsets[] =
-{
- CRTC0_REGISTER_OFFSET,
- CRTC1_REGISTER_OFFSET,
- CRTC2_REGISTER_OFFSET,
- CRTC3_REGISTER_OFFSET,
- CRTC4_REGISTER_OFFSET,
- CRTC5_REGISTER_OFFSET,
- CRTC6_REGISTER_OFFSET
-};
-
-static const u32 hpd_offsets[] =
-{
- HPD0_REGISTER_OFFSET,
- HPD1_REGISTER_OFFSET,
- HPD2_REGISTER_OFFSET,
- HPD3_REGISTER_OFFSET,
- HPD4_REGISTER_OFFSET,
- HPD5_REGISTER_OFFSET
-};
-
-static const uint32_t dig_offsets[] = {
- DIG0_REGISTER_OFFSET,
- DIG1_REGISTER_OFFSET,
- DIG2_REGISTER_OFFSET,
- DIG3_REGISTER_OFFSET,
- DIG4_REGISTER_OFFSET,
- DIG5_REGISTER_OFFSET,
- DIG6_REGISTER_OFFSET,
- DIG7_REGISTER_OFFSET,
- DIG8_REGISTER_OFFSET
-};
-
-static const struct {
- uint32_t reg;
- uint32_t vblank;
- uint32_t vline;
- uint32_t hpd;
-
-} interrupt_status_offsets[] = { {
- .reg = mmDISP_INTERRUPT_STATUS,
- .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
-}, {
- .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
- .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
- .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
- .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
-} };
-
-static const u32 cz_golden_settings_a11[] =
-{
- mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
- mmFBC_MISC, 0x1f311fff, 0x14300000,
-};
-
-static const u32 cz_mgcg_cgcg_init[] =
-{
- mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
- mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
-};
-
-static const u32 stoney_golden_settings_a11[] =
-{
- mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
- mmFBC_MISC, 0x1f311fff, 0x14302000,
-};
-
-static const u32 polaris11_golden_settings_a11[] =
-{
- mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
- mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
- mmFBC_DEBUG1, 0xffffffff, 0x00000008,
- mmFBC_MISC, 0x9f313fff, 0x14302008,
- mmHDMI_CONTROL, 0x313f031f, 0x00000011,
-};
-
-static const u32 polaris10_golden_settings_a11[] =
-{
- mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
- mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
- mmFBC_MISC, 0x9f313fff, 0x14302008,
- mmHDMI_CONTROL, 0x313f031f, 0x00000011,
-};
-
-static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
-{
- switch (adev->asic_type) {
- case CHIP_CARRIZO:
- amdgpu_program_register_sequence(adev,
- cz_mgcg_cgcg_init,
- (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
- amdgpu_program_register_sequence(adev,
- cz_golden_settings_a11,
- (const u32)ARRAY_SIZE(cz_golden_settings_a11));
- break;
- case CHIP_STONEY:
- amdgpu_program_register_sequence(adev,
- stoney_golden_settings_a11,
- (const u32)ARRAY_SIZE(stoney_golden_settings_a11));
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- amdgpu_program_register_sequence(adev,
- polaris11_golden_settings_a11,
- (const u32)ARRAY_SIZE(polaris11_golden_settings_a11));
- break;
- case CHIP_POLARIS10:
- amdgpu_program_register_sequence(adev,
- polaris10_golden_settings_a11,
- (const u32)ARRAY_SIZE(polaris10_golden_settings_a11));
- break;
- default:
- break;
- }
-}
-
-static u32 dce_v11_0_audio_endpt_rreg(struct amdgpu_device *adev,
- u32 block_offset, u32 reg)
-{
- unsigned long flags;
- u32 r;
-
- spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
- WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
- r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
- spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
-
- return r;
-}
-
-static void dce_v11_0_audio_endpt_wreg(struct amdgpu_device *adev,
- u32 block_offset, u32 reg, u32 v)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
- WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
- WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
- spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
-}
-
-static bool dce_v11_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
-{
- if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
- CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
- return true;
- else
- return false;
-}
-
-static bool dce_v11_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
-{
- u32 pos1, pos2;
-
- pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
- pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
-
- if (pos1 != pos2)
- return true;
- else
- return false;
-}
-
-/**
- * dce_v11_0_vblank_wait - vblank wait asic callback.
- *
- * @adev: amdgpu_device pointer
- * @crtc: crtc to wait for vblank on
- *
- * Wait for vblank on the requested crtc (evergreen+).
- */
-static void dce_v11_0_vblank_wait(struct amdgpu_device *adev, int crtc)
-{
- unsigned i = 100;
-
- if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
- return;
-
- if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
- return;
-
- /* depending on when we hit vblank, we may be close to active; if so,
- * wait for another frame.
- */
- while (dce_v11_0_is_in_vblank(adev, crtc)) {
- if (i++ == 100) {
- i = 0;
- if (!dce_v11_0_is_counter_moving(adev, crtc))
- break;
- }
- }
-
- while (!dce_v11_0_is_in_vblank(adev, crtc)) {
- if (i++ == 100) {
- i = 0;
- if (!dce_v11_0_is_counter_moving(adev, crtc))
- break;
- }
- }
-}
-
-static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
-{
- if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
- return 0;
- else
- return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
-}
-
-static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev)
-{
- unsigned i;
-
- /* Enable pflip interrupts */
- for (i = 0; i < adev->mode_info.num_crtc; i++)
- amdgpu_irq_get(adev, &adev->pageflip_irq, i);
-}
-
-static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
-{
- unsigned i;
-
- /* Disable pflip interrupts */
- for (i = 0; i < adev->mode_info.num_crtc; i++)
- amdgpu_irq_put(adev, &adev->pageflip_irq, i);
-}
-
-/**
- * dce_v11_0_page_flip - pageflip callback.
- *
- * @adev: amdgpu_device pointer
- * @crtc_id: crtc to cleanup pageflip on
- * @crtc_base: new address of the crtc (GPU MC address)
- *
- * Triggers the actual pageflip by updating the primary
- * surface base address.
- */
-static void dce_v11_0_page_flip(struct amdgpu_device *adev,
- int crtc_id, u64 crtc_base, bool async)
-{
- struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
- u32 tmp;
-
- /* flip immediate for async, default is vsync */
- tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
- GRPH_SURFACE_UPDATE_IMMEDIATE_EN, async ? 1 : 0);
- WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- /* update the scanout addresses */
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
- upper_32_bits(crtc_base));
- /* writing to the low address triggers the update */
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- lower_32_bits(crtc_base));
- /* post the write */
- RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
-}
-
-static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
- u32 *vbl, u32 *position)
-{
- if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
- return -EINVAL;
-
- *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
- *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
-
- return 0;
-}
-
-/**
- * dce_v11_0_hpd_sense - hpd sense callback.
- *
- * @adev: amdgpu_device pointer
- * @hpd: hpd (hotplug detect) pin
- *
- * Checks if a digital monitor is connected (evergreen+).
- * Returns true if connected, false if not connected.
- */
-static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev,
- enum amdgpu_hpd_id hpd)
-{
- bool connected = false;
-
- if (hpd >= adev->mode_info.num_hpd)
- return connected;
-
- if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
- DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
- connected = true;
-
- return connected;
-}
-
-/**
- * dce_v11_0_hpd_set_polarity - hpd set polarity callback.
- *
- * @adev: amdgpu_device pointer
- * @hpd: hpd (hotplug detect) pin
- *
- * Set the polarity of the hpd pin (evergreen+).
- */
-static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev,
- enum amdgpu_hpd_id hpd)
-{
- u32 tmp;
- bool connected = dce_v11_0_hpd_sense(adev, hpd);
-
- if (hpd >= adev->mode_info.num_hpd)
- return;
-
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
- if (connected)
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
- else
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
-}
-
-/**
- * dce_v11_0_hpd_init - hpd setup callback.
- *
- * @adev: amdgpu_device pointer
- *
- * Setup the hpd pins used by the card (evergreen+).
- * Enable the pin, set the polarity, and enable the hpd interrupts.
- */
-static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev->ddev;
- struct drm_connector *connector;
- u32 tmp;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
-
- if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
- continue;
-
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
- connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
- /* don't try to enable hpd on eDP or LVDS avoid breaking the
- * aux dp channel on imac and help (but not completely fix)
- * https://bugzilla.redhat.com/show_bug.cgi?id=726143
- * also avoid interrupt storms during dpms.
- */
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
- continue;
- }
-
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
-
- tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
- DC_HPD_CONNECT_INT_DELAY,
- AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
- tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
- DC_HPD_DISCONNECT_INT_DELAY,
- AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
- WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
-
- dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
- amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
- }
-}
-
-/**
- * dce_v11_0_hpd_fini - hpd tear down callback.
- *
- * @adev: amdgpu_device pointer
- *
- * Tear down the hpd pins used by the card (evergreen+).
- * Disable the hpd interrupts.
- */
-static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev->ddev;
- struct drm_connector *connector;
- u32 tmp;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
-
- if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
- continue;
-
- tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
- WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
-
- amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
- }
-}
-
-static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
-{
- return mmDC_GPIO_HPD_A;
-}
-
-static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev)
-{
- u32 crtc_hung = 0;
- u32 crtc_status[6];
- u32 i, j, tmp;
-
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) {
- crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
- crtc_hung |= (1 << i);
- }
- }
-
- for (j = 0; j < 10; j++) {
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (crtc_hung & (1 << i)) {
- tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
- if (tmp != crtc_status[i])
- crtc_hung &= ~(1 << i);
- }
- }
- if (crtc_hung == 0)
- return false;
- udelay(100);
- }
-
- return true;
-}
-
-static void dce_v11_0_stop_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- u32 crtc_enabled, tmp;
- int i;
-
- save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
- save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
-
- /* disable VGA render */
- tmp = RREG32(mmVGA_RENDER_CONTROL);
- tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
- WREG32(mmVGA_RENDER_CONTROL, tmp);
-
- /* blank the display controllers */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
- CRTC_CONTROL, CRTC_MASTER_EN);
- if (crtc_enabled) {
-#if 1
- save->crtc_enabled[i] = true;
- tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
- /*it is correct only for RGB ; black is 0*/
- WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
- tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
- WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- }
-#else
- /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
- tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
- WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- save->crtc_enabled[i] = false;
- /* ***** */
-#endif
- } else {
- save->crtc_enabled[i] = false;
- }
- }
-}
-
-static void dce_v11_0_resume_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- u32 tmp;
- int i;
-
- /* update crtc base addresses */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
- upper_32_bits(adev->mc.vram_start));
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
- (u32)adev->mc.vram_start);
-
- if (save->crtc_enabled[i]) {
- tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
- tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
- WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- }
- }
-
- WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
- WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
-
- /* Unlock vga access */
- WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
- mdelay(1);
- WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
-}
-
-static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev,
- bool render)
-{
- u32 tmp;
-
- /* Lockout access through VGA aperture*/
- tmp = RREG32(mmVGA_HDP_CONTROL);
- if (render)
- tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
- else
- tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
- WREG32(mmVGA_HDP_CONTROL, tmp);
-
- /* disable VGA render */
- tmp = RREG32(mmVGA_RENDER_CONTROL);
- if (render)
- tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
- else
- tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
- WREG32(mmVGA_RENDER_CONTROL, tmp);
-}
-
-static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
-{
- int num_crtc = 0;
-
- switch (adev->asic_type) {
- case CHIP_CARRIZO:
- num_crtc = 3;
- break;
- case CHIP_STONEY:
- num_crtc = 2;
- break;
- case CHIP_POLARIS10:
- num_crtc = 6;
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- num_crtc = 5;
- break;
- default:
- num_crtc = 0;
- }
- return num_crtc;
-}
-
-void dce_v11_0_disable_dce(struct amdgpu_device *adev)
-{
- /*Disable VGA render and enabled crtc, if has DCE engine*/
- if (amdgpu_atombios_has_dce_engine_info(adev)) {
- u32 tmp;
- int crtc_enabled, i;
-
- dce_v11_0_set_vga_render_state(adev, false);
-
- /*Disable crtc*/
- for (i = 0; i < dce_v11_0_get_num_crtc(adev); i++) {
- crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
- CRTC_CONTROL, CRTC_MASTER_EN);
- if (crtc_enabled) {
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
- tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
- WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- }
- }
- }
-}
-
-static void dce_v11_0_program_fmt(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
- struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
- int bpc = 0;
- u32 tmp = 0;
- enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
-
- if (connector) {
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- bpc = amdgpu_connector_get_monitor_bpc(connector);
- dither = amdgpu_connector->dither;
- }
-
- /* LVDS/eDP FMT is set up by atom */
- if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
- return;
-
- /* not needed for analog */
- if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
- (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
- return;
-
- if (bpc == 0)
- return;
-
- switch (bpc) {
- case 6:
- if (dither == AMDGPU_FMT_DITHER_ENABLE) {
- /* XXX sort out optimal dither settings */
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0);
- } else {
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0);
- }
- break;
- case 8:
- if (dither == AMDGPU_FMT_DITHER_ENABLE) {
- /* XXX sort out optimal dither settings */
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1);
- } else {
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1);
- }
- break;
- case 10:
- if (dither == AMDGPU_FMT_DITHER_ENABLE) {
- /* XXX sort out optimal dither settings */
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2);
- } else {
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
- tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2);
- }
- break;
- default:
- /* not needed */
- break;
- }
-
- WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-}
-
-
-/* display watermark setup */
-/**
- * dce_v11_0_line_buffer_adjust - Set up the line buffer
- *
- * @adev: amdgpu_device pointer
- * @amdgpu_crtc: the selected display controller
- * @mode: the current display mode on the selected display
- * controller
- *
- * Setup up the line buffer allocation for
- * the selected display controller (CIK).
- * Returns the line buffer size in pixels.
- */
-static u32 dce_v11_0_line_buffer_adjust(struct amdgpu_device *adev,
- struct amdgpu_crtc *amdgpu_crtc,
- struct drm_display_mode *mode)
-{
- u32 tmp, buffer_alloc, i, mem_cfg;
- u32 pipe_offset = amdgpu_crtc->crtc_id;
- /*
- * Line Buffer Setup
- * There are 6 line buffers, one for each display controllers.
- * There are 3 partitions per LB. Select the number of partitions
- * to enable based on the display width. For display widths larger
- * than 4096, you need use to use 2 display controllers and combine
- * them using the stereo blender.
- */
- if (amdgpu_crtc->base.enabled && mode) {
- if (mode->crtc_hdisplay < 1920) {
- mem_cfg = 1;
- buffer_alloc = 2;
- } else if (mode->crtc_hdisplay < 2560) {
- mem_cfg = 2;
- buffer_alloc = 2;
- } else if (mode->crtc_hdisplay < 4096) {
- mem_cfg = 0;
- buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
- } else {
- DRM_DEBUG_KMS("Mode too big for LB!\n");
- mem_cfg = 0;
- buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
- }
- } else {
- mem_cfg = 1;
- buffer_alloc = 0;
- }
-
- tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg);
- WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
- tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc);
- WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp);
-
- for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
- if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED))
- break;
- udelay(1);
- }
-
- if (amdgpu_crtc->base.enabled && mode) {
- switch (mem_cfg) {
- case 0:
- default:
- return 4096 * 2;
- case 1:
- return 1920 * 2;
- case 2:
- return 2560 * 2;
- }
- }
-
- /* controller not enabled, so no lb used */
- return 0;
-}
-
-/**
- * cik_get_number_of_dram_channels - get the number of dram channels
- *
- * @adev: amdgpu_device pointer
- *
- * Look up the number of video ram channels (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the number of dram channels
- */
-static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
-{
- u32 tmp = RREG32(mmMC_SHARED_CHMAP);
-
- switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
- case 0:
- default:
- return 1;
- case 1:
- return 2;
- case 2:
- return 4;
- case 3:
- return 8;
- case 4:
- return 3;
- case 5:
- return 6;
- case 6:
- return 10;
- case 7:
- return 12;
- case 8:
- return 16;
- }
-}
-
-struct dce10_wm_params {
- u32 dram_channels; /* number of dram channels */
- u32 yclk; /* bandwidth per dram data pin in kHz */
- u32 sclk; /* engine clock in kHz */
- u32 disp_clk; /* display clock in kHz */
- u32 src_width; /* viewport width */
- u32 active_time; /* active display time in ns */
- u32 blank_time; /* blank time in ns */
- bool interlaced; /* mode is interlaced */
- fixed20_12 vsc; /* vertical scale ratio */
- u32 num_heads; /* number of active crtcs */
- u32 bytes_per_pixel; /* bytes per pixel display + overlay */
- u32 lb_size; /* line buffer allocated to pipe */
- u32 vtaps; /* vertical scaler taps */
-};
-
-/**
- * dce_v11_0_dram_bandwidth - get the dram bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the raw dram bandwidth (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the dram bandwidth in MBytes/s
- */
-static u32 dce_v11_0_dram_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate raw DRAM Bandwidth */
- fixed20_12 dram_efficiency; /* 0.7 */
- fixed20_12 yclk, dram_channels, bandwidth;
- fixed20_12 a;
-
- a.full = dfixed_const(1000);
- yclk.full = dfixed_const(wm->yclk);
- yclk.full = dfixed_div(yclk, a);
- dram_channels.full = dfixed_const(wm->dram_channels * 4);
- a.full = dfixed_const(10);
- dram_efficiency.full = dfixed_const(7);
- dram_efficiency.full = dfixed_div(dram_efficiency, a);
- bandwidth.full = dfixed_mul(dram_channels, yclk);
- bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_dram_bandwidth_for_display - get the dram bandwidth for display
- *
- * @wm: watermark calculation data
- *
- * Calculate the dram bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the dram bandwidth for display in MBytes/s
- */
-static u32 dce_v11_0_dram_bandwidth_for_display(struct dce10_wm_params *wm)
-{
- /* Calculate DRAM Bandwidth and the part allocated to display. */
- fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
- fixed20_12 yclk, dram_channels, bandwidth;
- fixed20_12 a;
-
- a.full = dfixed_const(1000);
- yclk.full = dfixed_const(wm->yclk);
- yclk.full = dfixed_div(yclk, a);
- dram_channels.full = dfixed_const(wm->dram_channels * 4);
- a.full = dfixed_const(10);
- disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
- disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
- bandwidth.full = dfixed_mul(dram_channels, yclk);
- bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_data_return_bandwidth - get the data return bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the data return bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the data return bandwidth in MBytes/s
- */
-static u32 dce_v11_0_data_return_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate the display Data return Bandwidth */
- fixed20_12 return_efficiency; /* 0.8 */
- fixed20_12 sclk, bandwidth;
- fixed20_12 a;
-
- a.full = dfixed_const(1000);
- sclk.full = dfixed_const(wm->sclk);
- sclk.full = dfixed_div(sclk, a);
- a.full = dfixed_const(10);
- return_efficiency.full = dfixed_const(8);
- return_efficiency.full = dfixed_div(return_efficiency, a);
- a.full = dfixed_const(32);
- bandwidth.full = dfixed_mul(a, sclk);
- bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_dmif_request_bandwidth - get the dmif bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the dmif bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the dmif bandwidth in MBytes/s
- */
-static u32 dce_v11_0_dmif_request_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate the DMIF Request Bandwidth */
- fixed20_12 disp_clk_request_efficiency; /* 0.8 */
- fixed20_12 disp_clk, bandwidth;
- fixed20_12 a, b;
-
- a.full = dfixed_const(1000);
- disp_clk.full = dfixed_const(wm->disp_clk);
- disp_clk.full = dfixed_div(disp_clk, a);
- a.full = dfixed_const(32);
- b.full = dfixed_mul(a, disp_clk);
-
- a.full = dfixed_const(10);
- disp_clk_request_efficiency.full = dfixed_const(8);
- disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
-
- bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_available_bandwidth - get the min available bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the min available bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the min available bandwidth in MBytes/s
- */
-static u32 dce_v11_0_available_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
- u32 dram_bandwidth = dce_v11_0_dram_bandwidth(wm);
- u32 data_return_bandwidth = dce_v11_0_data_return_bandwidth(wm);
- u32 dmif_req_bandwidth = dce_v11_0_dmif_request_bandwidth(wm);
-
- return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
-}
-
-/**
- * dce_v11_0_average_bandwidth - get the average available bandwidth
- *
- * @wm: watermark calculation data
- *
- * Calculate the average available bandwidth used for display (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the average available bandwidth in MBytes/s
- */
-static u32 dce_v11_0_average_bandwidth(struct dce10_wm_params *wm)
-{
- /* Calculate the display mode Average Bandwidth
- * DisplayMode should contain the source and destination dimensions,
- * timing, etc.
- */
- fixed20_12 bpp;
- fixed20_12 line_time;
- fixed20_12 src_width;
- fixed20_12 bandwidth;
- fixed20_12 a;
-
- a.full = dfixed_const(1000);
- line_time.full = dfixed_const(wm->active_time + wm->blank_time);
- line_time.full = dfixed_div(line_time, a);
- bpp.full = dfixed_const(wm->bytes_per_pixel);
- src_width.full = dfixed_const(wm->src_width);
- bandwidth.full = dfixed_mul(src_width, bpp);
- bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
- bandwidth.full = dfixed_div(bandwidth, line_time);
-
- return dfixed_trunc(bandwidth);
-}
-
-/**
- * dce_v11_0_latency_watermark - get the latency watermark
- *
- * @wm: watermark calculation data
- *
- * Calculate the latency watermark (CIK).
- * Used for display watermark bandwidth calculations
- * Returns the latency watermark in ns
- */
-static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm)
-{
- /* First calculate the latency in ns */
- u32 mc_latency = 2000; /* 2000 ns. */
- u32 available_bandwidth = dce_v11_0_available_bandwidth(wm);
- u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
- u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
- u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
- u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
- (wm->num_heads * cursor_line_pair_return_time);
- u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
- u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
- u32 tmp, dmif_size = 12288;
- fixed20_12 a, b, c;
-
- if (wm->num_heads == 0)
- return 0;
-
- a.full = dfixed_const(2);
- b.full = dfixed_const(1);
- if ((wm->vsc.full > a.full) ||
- ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
- (wm->vtaps >= 5) ||
- ((wm->vsc.full >= a.full) && wm->interlaced))
- max_src_lines_per_dst_line = 4;
- else
- max_src_lines_per_dst_line = 2;
-
- a.full = dfixed_const(available_bandwidth);
- b.full = dfixed_const(wm->num_heads);
- a.full = dfixed_div(a, b);
- tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
- tmp = min(dfixed_trunc(a), tmp);
-
- lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
-
- a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
- b.full = dfixed_const(1000);
- c.full = dfixed_const(lb_fill_bw);
- b.full = dfixed_div(c, b);
- a.full = dfixed_div(a, b);
- line_fill_time = dfixed_trunc(a);
-
- if (line_fill_time < wm->active_time)
- return latency;
- else
- return latency + (line_fill_time - wm->active_time);
-
-}
-
-/**
- * dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display - check
- * average and available dram bandwidth
- *
- * @wm: watermark calculation data
- *
- * Check if the display average bandwidth fits in the display
- * dram bandwidth (CIK).
- * Used for display watermark bandwidth calculations
- * Returns true if the display fits, false if not.
- */
-static bool dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm)
-{
- if (dce_v11_0_average_bandwidth(wm) <=
- (dce_v11_0_dram_bandwidth_for_display(wm) / wm->num_heads))
- return true;
- else
- return false;
-}
-
-/**
- * dce_v11_0_average_bandwidth_vs_available_bandwidth - check
- * average and available bandwidth
- *
- * @wm: watermark calculation data
- *
- * Check if the display average bandwidth fits in the display
- * available bandwidth (CIK).
- * Used for display watermark bandwidth calculations
- * Returns true if the display fits, false if not.
- */
-static bool dce_v11_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm)
-{
- if (dce_v11_0_average_bandwidth(wm) <=
- (dce_v11_0_available_bandwidth(wm) / wm->num_heads))
- return true;
- else
- return false;
-}
-
-/**
- * dce_v11_0_check_latency_hiding - check latency hiding
- *
- * @wm: watermark calculation data
- *
- * Check latency hiding (CIK).
- * Used for display watermark bandwidth calculations
- * Returns true if the display fits, false if not.
- */
-static bool dce_v11_0_check_latency_hiding(struct dce10_wm_params *wm)
-{
- u32 lb_partitions = wm->lb_size / wm->src_width;
- u32 line_time = wm->active_time + wm->blank_time;
- u32 latency_tolerant_lines;
- u32 latency_hiding;
- fixed20_12 a;
-
- a.full = dfixed_const(1);
- if (wm->vsc.full > a.full)
- latency_tolerant_lines = 1;
- else {
- if (lb_partitions <= (wm->vtaps + 1))
- latency_tolerant_lines = 1;
- else
- latency_tolerant_lines = 2;
- }
-
- latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
-
- if (dce_v11_0_latency_watermark(wm) <= latency_hiding)
- return true;
- else
- return false;
-}
-
-/**
- * dce_v11_0_program_watermarks - program display watermarks
- *
- * @adev: amdgpu_device pointer
- * @amdgpu_crtc: the selected display controller
- * @lb_size: line buffer size
- * @num_heads: number of display controllers in use
- *
- * Calculate and program the display watermarks for the
- * selected display controller (CIK).
- */
-static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
- struct amdgpu_crtc *amdgpu_crtc,
- u32 lb_size, u32 num_heads)
-{
- struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
- struct dce10_wm_params wm_low, wm_high;
- u32 active_time;
- u32 line_time = 0;
- u32 latency_watermark_a = 0, latency_watermark_b = 0;
- u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
-
- if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
-
- /* watermark for high clocks */
- if (adev->pm.dpm_enabled) {
- wm_high.yclk =
- amdgpu_dpm_get_mclk(adev, false) * 10;
- wm_high.sclk =
- amdgpu_dpm_get_sclk(adev, false) * 10;
- } else {
- wm_high.yclk = adev->pm.current_mclk * 10;
- wm_high.sclk = adev->pm.current_sclk * 10;
- }
-
- wm_high.disp_clk = mode->clock;
- wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = active_time;
- wm_high.blank_time = line_time - wm_high.active_time;
- wm_high.interlaced = false;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- wm_high.interlaced = true;
- wm_high.vsc = amdgpu_crtc->vsc;
- wm_high.vtaps = 1;
- if (amdgpu_crtc->rmx_type != RMX_OFF)
- wm_high.vtaps = 2;
- wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
- wm_high.lb_size = lb_size;
- wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
- wm_high.num_heads = num_heads;
-
- /* set for high clocks */
- latency_watermark_a = min(dce_v11_0_latency_watermark(&wm_high), (u32)65535);
-
- /* possibly force display priority to high */
- /* should really do this at mode validation time... */
- if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
- !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
- !dce_v11_0_check_latency_hiding(&wm_high) ||
- (adev->mode_info.disp_priority == 2)) {
- DRM_DEBUG_KMS("force priority to high\n");
- }
-
- /* watermark for low clocks */
- if (adev->pm.dpm_enabled) {
- wm_low.yclk =
- amdgpu_dpm_get_mclk(adev, true) * 10;
- wm_low.sclk =
- amdgpu_dpm_get_sclk(adev, true) * 10;
- } else {
- wm_low.yclk = adev->pm.current_mclk * 10;
- wm_low.sclk = adev->pm.current_sclk * 10;
- }
-
- wm_low.disp_clk = mode->clock;
- wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = active_time;
- wm_low.blank_time = line_time - wm_low.active_time;
- wm_low.interlaced = false;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- wm_low.interlaced = true;
- wm_low.vsc = amdgpu_crtc->vsc;
- wm_low.vtaps = 1;
- if (amdgpu_crtc->rmx_type != RMX_OFF)
- wm_low.vtaps = 2;
- wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
- wm_low.lb_size = lb_size;
- wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
- wm_low.num_heads = num_heads;
-
- /* set for low clocks */
- latency_watermark_b = min(dce_v11_0_latency_watermark(&wm_low), (u32)65535);
-
- /* possibly force display priority to high */
- /* should really do this at mode validation time... */
- if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
- !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
- !dce_v11_0_check_latency_hiding(&wm_low) ||
- (adev->mode_info.disp_priority == 2)) {
- DRM_DEBUG_KMS("force priority to high\n");
- }
- lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
- }
-
- /* select wm A */
- wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1);
- WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
- tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
- WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- /* select wm B */
- tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
- WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
- tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
- WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
- /* restore original selection */
- WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
-
- /* save values for DPM */
- amdgpu_crtc->line_time = line_time;
- amdgpu_crtc->wm_high = latency_watermark_a;
- amdgpu_crtc->wm_low = latency_watermark_b;
- /* Save number of lines the linebuffer leads before the scanout */
- amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
-}
-
-/**
- * dce_v11_0_bandwidth_update - program display watermarks
- *
- * @adev: amdgpu_device pointer
- *
- * Calculate and program the display watermarks and line
- * buffer allocation (CIK).
- */
-static void dce_v11_0_bandwidth_update(struct amdgpu_device *adev)
-{
- struct drm_display_mode *mode = NULL;
- u32 num_heads = 0, lb_size;
- int i;
-
- amdgpu_update_display_priority(adev);
-
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->mode_info.crtcs[i]->base.enabled)
- num_heads++;
- }
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- mode = &adev->mode_info.crtcs[i]->base.mode;
- lb_size = dce_v11_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
- dce_v11_0_program_watermarks(adev, adev->mode_info.crtcs[i],
- lb_size, num_heads);
- }
-}
-
-static void dce_v11_0_audio_get_connected_pins(struct amdgpu_device *adev)
-{
- int i;
- u32 offset, tmp;
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- offset = adev->mode_info.audio.pin[i].offset;
- tmp = RREG32_AUDIO_ENDPT(offset,
- ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
- if (((tmp &
- AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
- AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
- adev->mode_info.audio.pin[i].connected = false;
- else
- adev->mode_info.audio.pin[i].connected = true;
- }
-}
-
-static struct amdgpu_audio_pin *dce_v11_0_audio_get_pin(struct amdgpu_device *adev)
-{
- int i;
-
- dce_v11_0_audio_get_connected_pins(adev);
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- if (adev->mode_info.audio.pin[i].connected)
- return &adev->mode_info.audio.pin[i];
- }
- DRM_ERROR("No connected audio pins found!\n");
- return NULL;
-}
-
-static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder)
-{
- struct amdgpu_device *adev = encoder->dev->dev_private;
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- u32 tmp;
-
- if (!dig || !dig->afmt || !dig->afmt->pin)
- return;
-
- tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id);
- WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp);
-}
-
-static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct amdgpu_device *adev = encoder->dev->dev_private;
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct drm_connector *connector;
- struct amdgpu_connector *amdgpu_connector = NULL;
- u32 tmp;
- int interlace = 0;
-
- if (!dig || !dig->afmt || !dig->afmt->pin)
- return;
-
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- amdgpu_connector = to_amdgpu_connector(connector);
- break;
- }
- }
-
- if (!amdgpu_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- interlace = 1;
- if (connector->latency_present[interlace]) {
- tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
- VIDEO_LIPSYNC, connector->video_latency[interlace]);
- tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
- AUDIO_LIPSYNC, connector->audio_latency[interlace]);
- } else {
- tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
- VIDEO_LIPSYNC, 0);
- tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
- AUDIO_LIPSYNC, 0);
- }
- WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
- ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
-}
-
-static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
-{
- struct amdgpu_device *adev = encoder->dev->dev_private;
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct drm_connector *connector;
- struct amdgpu_connector *amdgpu_connector = NULL;
- u32 tmp;
- u8 *sadb = NULL;
- int sad_count;
-
- if (!dig || !dig->afmt || !dig->afmt->pin)
- return;
-
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- amdgpu_connector = to_amdgpu_connector(connector);
- break;
- }
- }
-
- if (!amdgpu_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
- if (sad_count < 0) {
- DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
- sad_count = 0;
- }
-
- /* program the speaker allocation */
- tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
- ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
- DP_CONNECTION, 0);
- /* set HDMI mode */
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
- HDMI_CONNECTION, 1);
- if (sad_count)
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
- SPEAKER_ALLOCATION, sadb[0]);
- else
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
- SPEAKER_ALLOCATION, 5); /* stereo */
- WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
- ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
-
- kfree(sadb);
-}
-
-static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
-{
- struct amdgpu_device *adev = encoder->dev->dev_private;
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct drm_connector *connector;
- struct amdgpu_connector *amdgpu_connector = NULL;
- struct cea_sad *sads;
- int i, sad_count;
-
- static const u16 eld_reg_to_type[][2] = {
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
- { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
- };
-
- if (!dig || !dig->afmt || !dig->afmt->pin)
- return;
-
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- amdgpu_connector = to_amdgpu_connector(connector);
- break;
- }
- }
-
- if (!amdgpu_connector) {
- DRM_ERROR("Couldn't find encoder's connector\n");
- return;
- }
-
- sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
- if (sad_count <= 0) {
- DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
- return;
- }
- BUG_ON(!sads);
-
- for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
- u32 tmp = 0;
- u8 stereo_freqs = 0;
- int max_channels = -1;
- int j;
-
- for (j = 0; j < sad_count; j++) {
- struct cea_sad *sad = &sads[j];
-
- if (sad->format == eld_reg_to_type[i][1]) {
- if (sad->channels > max_channels) {
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- MAX_CHANNELS, sad->channels);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- DESCRIPTOR_BYTE_2, sad->byte2);
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- SUPPORTED_FREQUENCIES, sad->freq);
- max_channels = sad->channels;
- }
-
- if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
- stereo_freqs |= sad->freq;
- else
- break;
- }
- }
-
- tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
- SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
- WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
- }
-
- kfree(sads);
-}
-
-static void dce_v11_0_audio_enable(struct amdgpu_device *adev,
- struct amdgpu_audio_pin *pin,
- bool enable)
-{
- if (!pin)
- return;
-
- WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
- enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
-}
-
-static const u32 pin_offsets[] =
-{
- AUD0_REGISTER_OFFSET,
- AUD1_REGISTER_OFFSET,
- AUD2_REGISTER_OFFSET,
- AUD3_REGISTER_OFFSET,
- AUD4_REGISTER_OFFSET,
- AUD5_REGISTER_OFFSET,
- AUD6_REGISTER_OFFSET,
- AUD7_REGISTER_OFFSET,
-};
-
-static int dce_v11_0_audio_init(struct amdgpu_device *adev)
-{
- int i;
-
- if (!amdgpu_audio)
- return 0;
-
- adev->mode_info.audio.enabled = true;
-
- switch (adev->asic_type) {
- case CHIP_CARRIZO:
- case CHIP_STONEY:
- adev->mode_info.audio.num_pins = 7;
- break;
- case CHIP_POLARIS10:
- adev->mode_info.audio.num_pins = 8;
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- adev->mode_info.audio.num_pins = 6;
- break;
- default:
- return -EINVAL;
- }
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- adev->mode_info.audio.pin[i].channels = -1;
- adev->mode_info.audio.pin[i].rate = -1;
- adev->mode_info.audio.pin[i].bits_per_sample = -1;
- adev->mode_info.audio.pin[i].status_bits = 0;
- adev->mode_info.audio.pin[i].category_code = 0;
- adev->mode_info.audio.pin[i].connected = false;
- adev->mode_info.audio.pin[i].offset = pin_offsets[i];
- adev->mode_info.audio.pin[i].id = i;
- /* disable audio. it will be set up later */
- /* XXX remove once we switch to ip funcs */
- dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
- }
-
- return 0;
-}
-
-static void dce_v11_0_audio_fini(struct amdgpu_device *adev)
-{
- int i;
-
- if (!amdgpu_audio)
- return;
-
- if (!adev->mode_info.audio.enabled)
- return;
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++)
- dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
-
- adev->mode_info.audio.enabled = false;
-}
-
-/*
- * update the N and CTS parameters for a given pixel clock rate
- */
-static void dce_v11_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- u32 tmp;
-
- tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
- WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
- tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
- WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
- WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
- tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
- WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
- WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
- tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
- WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
-
-}
-
-/*
- * build a HDMI Video Info Frame
- */
-static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
- void *buffer, size_t size)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- uint8_t *frame = buffer + 3;
- uint8_t *header = buffer;
-
- WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
- frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
- WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
- frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
- WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
- frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
- WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
- frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
-}
-
-static void dce_v11_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
- u32 dto_phase = 24 * 1000;
- u32 dto_modulo = clock;
- u32 tmp;
-
- if (!dig || !dig->afmt)
- return;
-
- /* XXX two dtos; generally use dto0 for hdmi */
- /* Express [24MHz / target pixel clock] as an exact rational
- * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
- * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
- */
- tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
- tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL,
- amdgpu_crtc->crtc_id);
- WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
- WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
- WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
-}
-
-/*
- * update the info frames with the data from the current display mode
- */
-static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
- u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
- struct hdmi_avi_infoframe frame;
- ssize_t err;
- u32 tmp;
- int bpc = 8;
-
- if (!dig || !dig->afmt)
- return;
-
- /* Silent, r600_hdmi_enable will raise WARN for us */
- if (!dig->afmt->enabled)
- return;
-
- /* hdmi deep color mode general control packets setup, if bpc > 8 */
- if (encoder->crtc) {
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
- bpc = amdgpu_crtc->bpc;
- }
-
- /* disable audio prior to setting up hw */
- dig->afmt->pin = dce_v11_0_audio_get_pin(adev);
- dce_v11_0_audio_enable(adev, dig->afmt->pin, false);
-
- dce_v11_0_audio_set_dto(encoder, mode->clock);
-
- tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
- WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */
-
- WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000);
-
- tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset);
- switch (bpc) {
- case 0:
- case 6:
- case 8:
- case 16:
- default:
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0);
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
- DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
- connector->name, bpc);
- break;
- case 10:
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1);
- DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
- connector->name);
- break;
- case 12:
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
- tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2);
- DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
- connector->name);
- break;
- }
- WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */
- tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */
- tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */
- WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
- /* enable audio info frames (frames won't be set until audio is enabled) */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
- /* required for audio info values to be updated */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
- WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
- /* required for audio info values to be updated */
- tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
- WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
- /* anything other than 0 */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2);
- WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
-
- WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */
-
- tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
- /* set the default audio delay */
- tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
- /* should be suffient for all audio modes and small enough for all hblanks */
- tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
- WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
- /* allow 60958 channel status fields to be updated */
- tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
- WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
- if (bpc > 8)
- /* clear SW CTS value */
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0);
- else
- /* select SW CTS value */
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1);
- /* allow hw to sent ACR packets when required */
- tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
- WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- dce_v11_0_afmt_update_ACR(encoder, mode->clock);
-
- tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
- WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
- WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
- tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
- WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
-
- dce_v11_0_audio_write_speaker_allocation(encoder);
-
- WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset,
- (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
-
- dce_v11_0_afmt_audio_select_pin(encoder);
- dce_v11_0_audio_write_sad_regs(encoder);
- dce_v11_0_audio_write_latency_fields(encoder, mode);
-
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
- if (err < 0) {
- DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
- return;
- }
-
- err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
- if (err < 0) {
- DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
- return;
- }
-
- dce_v11_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
-
- tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
- /* enable AVI info frames */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
- /* required for audio info values to be updated */
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
- WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
- tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
- WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
-
- tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
- /* send audio packets */
- tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
- WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
-
- WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF);
- WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF);
- WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001);
- WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001);
-
- /* enable audio after to setting up hw */
- dce_v11_0_audio_enable(adev, dig->afmt->pin, true);
-}
-
-static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
-
- if (!dig || !dig->afmt)
- return;
-
- /* Silent, r600_hdmi_enable will raise WARN for us */
- if (enable && dig->afmt->enabled)
- return;
- if (!enable && !dig->afmt->enabled)
- return;
-
- if (!enable && dig->afmt->pin) {
- dce_v11_0_audio_enable(adev, dig->afmt->pin, false);
- dig->afmt->pin = NULL;
- }
-
- dig->afmt->enabled = enable;
-
- DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
- enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
-}
-
-static int dce_v11_0_afmt_init(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->mode_info.num_dig; i++)
- adev->mode_info.afmt[i] = NULL;
-
- /* DCE11 has audio blocks tied to DIG encoders */
- for (i = 0; i < adev->mode_info.num_dig; i++) {
- adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
- if (adev->mode_info.afmt[i]) {
- adev->mode_info.afmt[i]->offset = dig_offsets[i];
- adev->mode_info.afmt[i]->id = i;
- } else {
- int j;
- for (j = 0; j < i; j++) {
- kfree(adev->mode_info.afmt[j]);
- adev->mode_info.afmt[j] = NULL;
- }
- return -ENOMEM;
- }
- }
- return 0;
-}
-
-static void dce_v11_0_afmt_fini(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0; i < adev->mode_info.num_dig; i++) {
- kfree(adev->mode_info.afmt[i]);
- adev->mode_info.afmt[i] = NULL;
- }
-}
-
-static const u32 vga_control_regs[6] =
-{
- mmD1VGA_CONTROL,
- mmD2VGA_CONTROL,
- mmD3VGA_CONTROL,
- mmD4VGA_CONTROL,
- mmD5VGA_CONTROL,
- mmD6VGA_CONTROL,
-};
-
-static void dce_v11_0_vga_enable(struct drm_crtc *crtc, bool enable)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
- u32 vga_control;
-
- vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
- if (enable)
- WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
- else
- WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
-}
-
-static void dce_v11_0_grph_enable(struct drm_crtc *crtc, bool enable)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
-
- if (enable)
- WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
- else
- WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
-}
-
-static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y, int atomic)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_framebuffer *amdgpu_fb;
- struct drm_framebuffer *target_fb;
- struct drm_gem_object *obj;
- struct amdgpu_bo *abo;
- uint64_t fb_location, tiling_flags;
- uint32_t fb_format, fb_pitch_pixels;
- u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
- u32 pipe_config;
- u32 tmp, viewport_w, viewport_h;
- int r;
- bool bypass_lut = false;
- struct drm_format_name_buf format_name;
-
- /* no fb bound */
- if (!atomic && !crtc->primary->fb) {
- DRM_DEBUG_KMS("No FB bound\n");
- return 0;
- }
-
- if (atomic) {
- amdgpu_fb = to_amdgpu_framebuffer(fb);
- target_fb = fb;
- } else {
- amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
- target_fb = crtc->primary->fb;
- }
-
- /* If atomic, assume fb object is pinned & idle & fenced and
- * just update base pointers
- */
- obj = amdgpu_fb->obj;
- abo = gem_to_amdgpu_bo(obj);
- r = amdgpu_bo_reserve(abo, false);
- if (unlikely(r != 0))
- return r;
-
- if (atomic) {
- fb_location = amdgpu_bo_gpu_offset(abo);
- } else {
- r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
- if (unlikely(r != 0)) {
- amdgpu_bo_unreserve(abo);
- return -EINVAL;
- }
- }
-
- amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
- amdgpu_bo_unreserve(abo);
-
- pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
-
- switch (target_fb->format->format) {
- case DRM_FORMAT_C8:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
- break;
- case DRM_FORMAT_XRGB4444:
- case DRM_FORMAT_ARGB4444:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN16);
-#endif
- break;
- case DRM_FORMAT_XRGB1555:
- case DRM_FORMAT_ARGB1555:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN16);
-#endif
- break;
- case DRM_FORMAT_BGRX5551:
- case DRM_FORMAT_BGRA5551:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN16);
-#endif
- break;
- case DRM_FORMAT_RGB565:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN16);
-#endif
- break;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN32);
-#endif
- break;
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_ARGB2101010:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN32);
-#endif
- /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
- bypass_lut = true;
- break;
- case DRM_FORMAT_BGRX1010102:
- case DRM_FORMAT_BGRA1010102:
- fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4);
-#ifdef __BIG_ENDIAN
- fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
- ENDIAN_8IN32);
-#endif
- /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
- bypass_lut = true;
- break;
- default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->format->format, &format_name));
- return -EINVAL;
- }
-
- if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
- unsigned bankw, bankh, mtaspect, tile_split, num_banks;
-
- bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
- bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
- mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
- tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
- num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
-
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
- ARRAY_2D_TILED_THIN1);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT,
- tile_split);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT,
- mtaspect);
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE,
- ADDR_SURF_MICRO_TILING_DISPLAY);
- } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
- ARRAY_1D_TILED_THIN1);
- }
-
- fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG,
- pipe_config);
-
- dce_v11_0_vga_enable(crtc, false);
-
- /* Make sure surface address is updated at vertical blank rather than
- * horizontal blank
- */
- tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
- GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
- WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
- upper_32_bits(fb_location));
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
- upper_32_bits(fb_location));
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
- WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
- WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
-
- /*
- * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
- * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
- * retain the full precision throughout the pipeline.
- */
- tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset);
- if (bypass_lut)
- tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1);
- else
- tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0);
- WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp);
-
- if (bypass_lut)
- DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
-
- WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
- WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
-
- fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
- WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
-
- dce_v11_0_grph_enable(crtc, true);
-
- WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
- target_fb->height);
-
- x &= ~3;
- y &= ~1;
- WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
- (x << 16) | y);
- viewport_w = crtc->mode.hdisplay;
- viewport_h = (crtc->mode.vdisplay + 1) & ~1;
- WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
- (viewport_w << 16) | viewport_h);
-
- /* set pageflip to happen anywhere in vblank interval */
- WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
-
- if (!atomic && fb && fb != crtc->primary->fb) {
- amdgpu_fb = to_amdgpu_framebuffer(fb);
- abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
- r = amdgpu_bo_reserve(abo, true);
- if (unlikely(r != 0))
- return r;
- amdgpu_bo_unpin(abo);
- amdgpu_bo_unreserve(abo);
- }
-
- /* Bytes per pixel may have changed */
- dce_v11_0_bandwidth_update(adev);
-
- return 0;
-}
-
-static void dce_v11_0_set_interleave(struct drm_crtc *crtc,
- struct drm_display_mode *mode)
-{
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- u32 tmp;
-
- tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset);
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1);
- else
- tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0);
- WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp);
-}
-
-static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
- int i;
- u32 tmp;
-
- DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
-
- tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0);
- WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1);
- WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0);
- WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
-
- WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
-
- WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
- WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
- WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
-
- WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
- WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
-
- WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
- for (i = 0; i < 256; i++) {
- WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
- (amdgpu_crtc->lut_r[i] << 20) |
- (amdgpu_crtc->lut_g[i] << 10) |
- (amdgpu_crtc->lut_b[i] << 0));
- }
-
- tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0);
- tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0);
- tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, 0);
- WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0);
- WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0);
- WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0);
- WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-
- /* XXX match this to the depth of the crtc fmt block, move to modeset? */
- WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0);
- /* XXX this only needs to be programmed once per crtc at startup,
- * not sure where the best place for it is
- */
- tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1);
- WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-}
-
-static int dce_v11_0_pick_dig_encoder(struct drm_encoder *encoder)
-{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
-
- switch (amdgpu_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- if (dig->linkb)
- return 1;
- else
- return 0;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- if (dig->linkb)
- return 3;
- else
- return 2;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- if (dig->linkb)
- return 5;
- else
- return 4;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
- return 6;
- break;
- default:
- DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
- return 0;
- }
-}
-
-/**
- * dce_v11_0_pick_pll - Allocate a PPLL for use by the crtc.
- *
- * @crtc: drm crtc
- *
- * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
- * a single PPLL can be used for all DP crtcs/encoders. For non-DP
- * monitors a dedicated PPLL must be used. If a particular board has
- * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
- * as there is no need to program the PLL itself. If we are not able to
- * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
- * avoid messing up an existing monitor.
- *
- * Asic specific PLL information
- *
- * DCE 10.x
- * Tonga
- * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
- * CI
- * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
- *
- */
-static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
- u32 pll_in_use;
- int pll;
-
- if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS12)) {
- struct amdgpu_encoder *amdgpu_encoder =
- to_amdgpu_encoder(amdgpu_crtc->encoder);
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
-
- if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
- return ATOM_DP_DTO;
-
- switch (amdgpu_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- if (dig->linkb)
- return ATOM_COMBOPHY_PLL1;
- else
- return ATOM_COMBOPHY_PLL0;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- if (dig->linkb)
- return ATOM_COMBOPHY_PLL3;
- else
- return ATOM_COMBOPHY_PLL2;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- if (dig->linkb)
- return ATOM_COMBOPHY_PLL5;
- else
- return ATOM_COMBOPHY_PLL4;
- break;
- default:
- DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
- return ATOM_PPLL_INVALID;
- }
- }
-
- if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
- if (adev->clock.dp_extclk)
- /* skip PPLL programming if using ext clock */
- return ATOM_PPLL_INVALID;
- else {
- /* use the same PPLL for all DP monitors */
- pll = amdgpu_pll_get_shared_dp_ppll(crtc);
- if (pll != ATOM_PPLL_INVALID)
- return pll;
- }
- } else {
- /* use the same PPLL for all monitors with the same clock */
- pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
- if (pll != ATOM_PPLL_INVALID)
- return pll;
- }
-
- /* XXX need to determine what plls are available on each DCE11 part */
- pll_in_use = amdgpu_pll_get_use_mask(crtc);
- if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) {
- if (!(pll_in_use & (1 << ATOM_PPLL1)))
- return ATOM_PPLL1;
- if (!(pll_in_use & (1 << ATOM_PPLL0)))
- return ATOM_PPLL0;
- DRM_ERROR("unable to allocate a PPLL\n");
- return ATOM_PPLL_INVALID;
- } else {
- if (!(pll_in_use & (1 << ATOM_PPLL2)))
- return ATOM_PPLL2;
- if (!(pll_in_use & (1 << ATOM_PPLL1)))
- return ATOM_PPLL1;
- if (!(pll_in_use & (1 << ATOM_PPLL0)))
- return ATOM_PPLL0;
- DRM_ERROR("unable to allocate a PPLL\n");
- return ATOM_PPLL_INVALID;
- }
- return ATOM_PPLL_INVALID;
-}
-
-static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock)
-{
- struct amdgpu_device *adev = crtc->dev->dev_private;
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- uint32_t cur_lock;
-
- cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
- if (lock)
- cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1);
- else
- cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0);
- WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
-}
-
-static void dce_v11_0_hide_cursor(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
- u32 tmp;
-
- tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-}
-
-static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
- u32 tmp;
-
- WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
- upper_32_bits(amdgpu_crtc->cursor_addr));
- WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- lower_32_bits(amdgpu_crtc->cursor_addr));
-
- tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
- tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
- tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
-}
-
-static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
- int x, int y)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
- int xorigin = 0, yorigin = 0;
-
- amdgpu_crtc->cursor_x = x;
- amdgpu_crtc->cursor_y = y;
-
- /* avivo cursor are offset into the total surface */
- x += crtc->x;
- y += crtc->y;
- DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
-
- if (x < 0) {
- xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
- x = 0;
- }
- if (y < 0) {
- yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
- y = 0;
- }
-
- WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
- WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
-
- return 0;
-}
-
-static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
- int x, int y)
-{
- int ret;
-
- dce_v11_0_lock_cursor(crtc, true);
- ret = dce_v11_0_cursor_move_locked(crtc, x, y);
- dce_v11_0_lock_cursor(crtc, false);
-
- return ret;
-}
-
-static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width,
- uint32_t height,
- int32_t hot_x,
- int32_t hot_y)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_gem_object *obj;
- struct amdgpu_bo *aobj;
- int ret;
-
- if (!handle) {
- /* turn off cursor */
- dce_v11_0_hide_cursor(crtc);
- obj = NULL;
- goto unpin;
- }
-
- if ((width > amdgpu_crtc->max_cursor_width) ||
- (height > amdgpu_crtc->max_cursor_height)) {
- DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
- return -EINVAL;
- }
-
- obj = drm_gem_object_lookup(file_priv, handle);
- if (!obj) {
- DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
- return -ENOENT;
- }
-
- aobj = gem_to_amdgpu_bo(obj);
- ret = amdgpu_bo_reserve(aobj, false);
- if (ret != 0) {
- drm_gem_object_unreference_unlocked(obj);
- return ret;
- }
-
- ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
- amdgpu_bo_unreserve(aobj);
- if (ret) {
- DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_unreference_unlocked(obj);
- return ret;
- }
-
- dce_v11_0_lock_cursor(crtc, true);
-
- if (width != amdgpu_crtc->cursor_width ||
- height != amdgpu_crtc->cursor_height ||
- hot_x != amdgpu_crtc->cursor_hot_x ||
- hot_y != amdgpu_crtc->cursor_hot_y) {
- int x, y;
-
- x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
- y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
-
- dce_v11_0_cursor_move_locked(crtc, x, y);
-
- amdgpu_crtc->cursor_width = width;
- amdgpu_crtc->cursor_height = height;
- amdgpu_crtc->cursor_hot_x = hot_x;
- amdgpu_crtc->cursor_hot_y = hot_y;
- }
-
- dce_v11_0_show_cursor(crtc);
- dce_v11_0_lock_cursor(crtc, false);
-
-unpin:
- if (amdgpu_crtc->cursor_bo) {
- struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
- ret = amdgpu_bo_reserve(aobj, true);
- if (likely(ret == 0)) {
- amdgpu_bo_unpin(aobj);
- amdgpu_bo_unreserve(aobj);
- }
- drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
- }
-
- amdgpu_crtc->cursor_bo = obj;
- return 0;
-}
-
-static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- if (amdgpu_crtc->cursor_bo) {
- dce_v11_0_lock_cursor(crtc, true);
-
- dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
- amdgpu_crtc->cursor_y);
-
- dce_v11_0_show_cursor(crtc);
-
- dce_v11_0_lock_cursor(crtc, false);
- }
-}
-
-static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int i;
-
- /* userspace palettes are always correct as is */
- for (i = 0; i < size; i++) {
- amdgpu_crtc->lut_r[i] = red[i] >> 6;
- amdgpu_crtc->lut_g[i] = green[i] >> 6;
- amdgpu_crtc->lut_b[i] = blue[i] >> 6;
- }
- dce_v11_0_crtc_load_lut(crtc);
-
- return 0;
-}
-
-static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- drm_crtc_cleanup(crtc);
- kfree(amdgpu_crtc);
-}
-
-static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
- .cursor_set2 = dce_v11_0_crtc_cursor_set2,
- .cursor_move = dce_v11_0_crtc_cursor_move,
- .gamma_set = dce_v11_0_crtc_gamma_set,
- .set_config = amdgpu_crtc_set_config,
- .destroy = dce_v11_0_crtc_destroy,
- .page_flip_target = amdgpu_crtc_page_flip_target,
-};
-
-static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- unsigned type;
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- amdgpu_crtc->enabled = true;
- amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
- dce_v11_0_vga_enable(crtc, true);
- amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
- dce_v11_0_vga_enable(crtc, false);
- /* Make sure VBLANK and PFLIP interrupts are still enabled */
- type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
- amdgpu_irq_update(adev, &adev->crtc_irq, type);
- amdgpu_irq_update(adev, &adev->pageflip_irq, type);
- drm_crtc_vblank_on(crtc);
- dce_v11_0_crtc_load_lut(crtc);
- break;
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- case DRM_MODE_DPMS_OFF:
- drm_crtc_vblank_off(crtc);
- if (amdgpu_crtc->enabled) {
- dce_v11_0_vga_enable(crtc, true);
- amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
- dce_v11_0_vga_enable(crtc, false);
- }
- amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
- amdgpu_crtc->enabled = false;
- break;
- }
- /* adjust pm to dpms */
- amdgpu_pm_compute_clocks(adev);
-}
-
-static void dce_v11_0_crtc_prepare(struct drm_crtc *crtc)
-{
- /* disable crtc pair power gating before programming */
- amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
- amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
- dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
-}
-
-static void dce_v11_0_crtc_commit(struct drm_crtc *crtc)
-{
- dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
- amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
-}
-
-static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_atom_ss ss;
- int i;
-
- dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
- if (crtc->primary->fb) {
- int r;
- struct amdgpu_framebuffer *amdgpu_fb;
- struct amdgpu_bo *abo;
-
- amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
- abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
- r = amdgpu_bo_reserve(abo, true);
- if (unlikely(r))
- DRM_ERROR("failed to reserve abo before unpin\n");
- else {
- amdgpu_bo_unpin(abo);
- amdgpu_bo_unreserve(abo);
- }
- }
- /* disable the GRPH */
- dce_v11_0_grph_enable(crtc, false);
-
- amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
-
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (adev->mode_info.crtcs[i] &&
- adev->mode_info.crtcs[i]->enabled &&
- i != amdgpu_crtc->crtc_id &&
- amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
- /* one other crtc is using this pll don't turn
- * off the pll
- */
- goto done;
- }
- }
-
- switch (amdgpu_crtc->pll_id) {
- case ATOM_PPLL0:
- case ATOM_PPLL1:
- case ATOM_PPLL2:
- /* disable the ppll */
- amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
- 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
- break;
- case ATOM_COMBOPHY_PLL0:
- case ATOM_COMBOPHY_PLL1:
- case ATOM_COMBOPHY_PLL2:
- case ATOM_COMBOPHY_PLL3:
- case ATOM_COMBOPHY_PLL4:
- case ATOM_COMBOPHY_PLL5:
- /* disable the ppll */
- amdgpu_atombios_crtc_program_pll(crtc, ATOM_CRTC_INVALID, amdgpu_crtc->pll_id,
- 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
- break;
- default:
- break;
- }
-done:
- amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
- amdgpu_crtc->adjusted_clock = 0;
- amdgpu_crtc->encoder = NULL;
- amdgpu_crtc->connector = NULL;
-}
-
-static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y, struct drm_framebuffer *old_fb)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
-
- if (!amdgpu_crtc->adjusted_clock)
- return -EINVAL;
-
- if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS12)) {
- struct amdgpu_encoder *amdgpu_encoder =
- to_amdgpu_encoder(amdgpu_crtc->encoder);
- int encoder_mode =
- amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder);
-
- /* SetPixelClock calculates the plls and ss values now */
- amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id,
- amdgpu_crtc->pll_id,
- encoder_mode, amdgpu_encoder->encoder_id,
- adjusted_mode->clock, 0, 0, 0, 0,
- amdgpu_crtc->bpc, amdgpu_crtc->ss_enabled, &amdgpu_crtc->ss);
- } else {
- amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
- }
- amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
- dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
- amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
- amdgpu_atombios_crtc_scaler_setup(crtc);
- dce_v11_0_cursor_reset(crtc);
- /* update the hw version fpr dpm */
- amdgpu_crtc->hw_mode = *adjusted_mode;
-
- return 0;
-}
-
-static bool dce_v11_0_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct drm_encoder *encoder;
-
- /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- amdgpu_crtc->encoder = encoder;
- amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
- break;
- }
- }
- if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
- amdgpu_crtc->encoder = NULL;
- amdgpu_crtc->connector = NULL;
- return false;
- }
- if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
- return false;
- if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
- return false;
- /* pick pll */
- amdgpu_crtc->pll_id = dce_v11_0_pick_pll(crtc);
- /* if we can't get a PPLL for a non-DP encoder, fail */
- if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
- !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
- return false;
-
- return true;
-}
-
-static int dce_v11_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- return dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
-}
-
-static int dce_v11_0_crtc_set_base_atomic(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y, enum mode_set_atomic state)
-{
- return dce_v11_0_crtc_do_set_base(crtc, fb, x, y, 1);
-}
-
-static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = {
- .dpms = dce_v11_0_crtc_dpms,
- .mode_fixup = dce_v11_0_crtc_mode_fixup,
- .mode_set = dce_v11_0_crtc_mode_set,
- .mode_set_base = dce_v11_0_crtc_set_base,
- .mode_set_base_atomic = dce_v11_0_crtc_set_base_atomic,
- .prepare = dce_v11_0_crtc_prepare,
- .commit = dce_v11_0_crtc_commit,
- .load_lut = dce_v11_0_crtc_load_lut,
- .disable = dce_v11_0_crtc_disable,
-};
-
-static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
-{
- struct amdgpu_crtc *amdgpu_crtc;
- int i;
-
- amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
- (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
- if (amdgpu_crtc == NULL)
- return -ENOMEM;
-
- drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v11_0_crtc_funcs);
-
- drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
- amdgpu_crtc->crtc_id = index;
- adev->mode_info.crtcs[index] = amdgpu_crtc;
-
- amdgpu_crtc->max_cursor_width = 128;
- amdgpu_crtc->max_cursor_height = 128;
- adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
- adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
-
- for (i = 0; i < 256; i++) {
- amdgpu_crtc->lut_r[i] = i << 2;
- amdgpu_crtc->lut_g[i] = i << 2;
- amdgpu_crtc->lut_b[i] = i << 2;
- }
-
- switch (amdgpu_crtc->crtc_id) {
- case 0:
- default:
- amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET;
- break;
- case 1:
- amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET;
- break;
- case 2:
- amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET;
- break;
- case 3:
- amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET;
- break;
- case 4:
- amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET;
- break;
- case 5:
- amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET;
- break;
- }
-
- amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
- amdgpu_crtc->adjusted_clock = 0;
- amdgpu_crtc->encoder = NULL;
- amdgpu_crtc->connector = NULL;
- drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v11_0_crtc_helper_funcs);
-
- return 0;
-}
-
-static int dce_v11_0_early_init(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- adev->audio_endpt_rreg = &dce_v11_0_audio_endpt_rreg;
- adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg;
-
- dce_v11_0_set_display_funcs(adev);
- dce_v11_0_set_irq_funcs(adev);
-
- adev->mode_info.num_crtc = dce_v11_0_get_num_crtc(adev);
-
- switch (adev->asic_type) {
- case CHIP_CARRIZO:
- adev->mode_info.num_hpd = 6;
- adev->mode_info.num_dig = 9;
- break;
- case CHIP_STONEY:
- adev->mode_info.num_hpd = 6;
- adev->mode_info.num_dig = 9;
- break;
- case CHIP_POLARIS10:
- adev->mode_info.num_hpd = 6;
- adev->mode_info.num_dig = 6;
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- adev->mode_info.num_hpd = 5;
- adev->mode_info.num_dig = 5;
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int dce_v11_0_sw_init(void *handle)
-{
- int r, i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
- if (r)
- return r;
- }
-
- for (i = 8; i < 20; i += 2) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
- if (r)
- return r;
- }
-
- /* HPD hotplug */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
- if (r)
- return r;
-
- adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
-
- adev->ddev->mode_config.async_page_flip = true;
-
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
-
- adev->ddev->mode_config.preferred_depth = 24;
- adev->ddev->mode_config.prefer_shadow = 1;
-
- adev->ddev->mode_config.fb_base = adev->mc.aper_base;
-
- r = amdgpu_modeset_create_props(adev);
- if (r)
- return r;
-
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
-
-
- /* allocate crtcs */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = dce_v11_0_crtc_init(adev, i);
- if (r)
- return r;
- }
-
- if (amdgpu_atombios_get_connector_info_from_object_table(adev))
- amdgpu_print_display_setup(adev->ddev);
- else
- return -EINVAL;
-
- /* setup afmt */
- r = dce_v11_0_afmt_init(adev);
- if (r)
- return r;
-
- r = dce_v11_0_audio_init(adev);
- if (r)
- return r;
-
- drm_kms_helper_poll_init(adev->ddev);
-
- adev->mode_info.mode_config_initialized = true;
- return 0;
-}
-
-static int dce_v11_0_sw_fini(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- kfree(adev->mode_info.bios_hardcoded_edid);
-
- drm_kms_helper_poll_fini(adev->ddev);
-
- dce_v11_0_audio_fini(adev);
-
- dce_v11_0_afmt_fini(adev);
-
- drm_mode_config_cleanup(adev->ddev);
- adev->mode_info.mode_config_initialized = false;
-
- return 0;
-}
-
-static int dce_v11_0_hw_init(void *handle)
-{
- int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- dce_v11_0_init_golden_registers(adev);
-
- /* init dig PHYs, disp eng pll */
- amdgpu_atombios_crtc_powergate_init(adev);
- amdgpu_atombios_encoder_init_dig(adev);
- if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS12)) {
- amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
- DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
- amdgpu_atombios_crtc_set_dce_clock(adev, 0,
- DCE_CLOCK_TYPE_DPREFCLK, ATOM_GCK_DFS);
- } else {
- amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
- }
-
- /* initialize hpd */
- dce_v11_0_hpd_init(adev);
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
- }
-
- dce_v11_0_pageflip_interrupt_init(adev);
-
- return 0;
-}
-
-static int dce_v11_0_hw_fini(void *handle)
-{
- int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- dce_v11_0_hpd_fini(adev);
-
- for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
- }
-
- dce_v11_0_pageflip_interrupt_fini(adev);
-
- return 0;
-}
-
-static int dce_v11_0_suspend(void *handle)
-{
- return dce_v11_0_hw_fini(handle);
-}
-
-static int dce_v11_0_resume(void *handle)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int ret;
-
- ret = dce_v11_0_hw_init(handle);
-
- /* turn on the BL */
- if (adev->mode_info.bl_encoder) {
- u8 bl_level = amdgpu_display_backlight_get_level(adev,
- adev->mode_info.bl_encoder);
- amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
- bl_level);
- }
-
- return ret;
-}
-
-static bool dce_v11_0_is_idle(void *handle)
-{
- return true;
-}
-
-static int dce_v11_0_wait_for_idle(void *handle)
-{
- return 0;
-}
-
-static int dce_v11_0_soft_reset(void *handle)
-{
- u32 srbm_soft_reset = 0, tmp;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (dce_v11_0_is_display_hung(adev))
- srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
-
- if (srbm_soft_reset) {
- tmp = RREG32(mmSRBM_SOFT_RESET);
- tmp |= srbm_soft_reset;
- dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
-
- udelay(50);
-
- tmp &= ~srbm_soft_reset;
- WREG32(mmSRBM_SOFT_RESET, tmp);
- tmp = RREG32(mmSRBM_SOFT_RESET);
-
- /* Wait a little for things to settle down */
- udelay(50);
- }
- return 0;
-}
-
-static void dce_v11_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
- int crtc,
- enum amdgpu_interrupt_state state)
-{
- u32 lb_interrupt_mask;
-
- if (crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
- lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
- VBLANK_INTERRUPT_MASK, 0);
- WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
- lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
- VBLANK_INTERRUPT_MASK, 1);
- WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
- break;
- default:
- break;
- }
-}
-
-static void dce_v11_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
- int crtc,
- enum amdgpu_interrupt_state state)
-{
- u32 lb_interrupt_mask;
-
- if (crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
- lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
- VLINE_INTERRUPT_MASK, 0);
- WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
- lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
- VLINE_INTERRUPT_MASK, 1);
- WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
- break;
- default:
- break;
- }
-}
-
-static int dce_v11_0_set_hpd_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned hpd,
- enum amdgpu_interrupt_state state)
-{
- u32 tmp;
-
- if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
- return 0;
- }
-
- switch (state) {
- case AMDGPU_IRQ_STATE_DISABLE:
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
- break;
- case AMDGPU_IRQ_STATE_ENABLE:
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static int dce_v11_0_set_crtc_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- switch (type) {
- case AMDGPU_CRTC_IRQ_VBLANK1:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 0, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK2:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 1, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK3:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 2, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK4:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 3, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK5:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 4, state);
- break;
- case AMDGPU_CRTC_IRQ_VBLANK6:
- dce_v11_0_set_crtc_vblank_interrupt_state(adev, 5, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE1:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 0, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE2:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 1, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE3:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 2, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE4:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 3, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE5:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 4, state);
- break;
- case AMDGPU_CRTC_IRQ_VLINE6:
- dce_v11_0_set_crtc_vline_interrupt_state(adev, 5, state);
- break;
- default:
- break;
- }
- return 0;
-}
-
-static int dce_v11_0_set_pageflip_irq_state(struct amdgpu_device *adev,
- struct amdgpu_irq_src *src,
- unsigned type,
- enum amdgpu_interrupt_state state)
-{
- u32 reg;
-
- if (type >= adev->mode_info.num_crtc) {
- DRM_ERROR("invalid pageflip crtc %d\n", type);
- return -EINVAL;
- }
-
- reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
- if (state == AMDGPU_IRQ_STATE_DISABLE)
- WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
- reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
- else
- WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
- reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
-
- return 0;
-}
-
-static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- unsigned long flags;
- unsigned crtc_id;
- struct amdgpu_crtc *amdgpu_crtc;
- struct amdgpu_flip_work *works;
-
- crtc_id = (entry->src_id - 8) >> 1;
- amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
-
- if (crtc_id >= adev->mode_info.num_crtc) {
- DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
- return -EINVAL;
- }
-
- if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
- GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
- WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
- GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
-
- /* IRQ could occur when in initial stage */
- if(amdgpu_crtc == NULL)
- return 0;
-
- spin_lock_irqsave(&adev->ddev->event_lock, flags);
- works = amdgpu_crtc->pflip_works;
- if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
- DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
- "AMDGPU_FLIP_SUBMITTED(%d)\n",
- amdgpu_crtc->pflip_status,
- AMDGPU_FLIP_SUBMITTED);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- return 0;
- }
-
- /* page flip completed. clean up */
- amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
- amdgpu_crtc->pflip_works = NULL;
-
- /* wakeup usersapce */
- if(works->event)
- drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
-
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
-
- drm_crtc_vblank_put(&amdgpu_crtc->base);
- schedule_work(&works->unpin_work);
-
- return 0;
-}
-
-static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev,
- int hpd)
-{
- u32 tmp;
-
- if (hpd >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", hpd);
- return;
- }
-
- tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
- tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1);
- WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
-}
-
-static void dce_v11_0_crtc_vblank_int_ack(struct amdgpu_device *adev,
- int crtc)
-{
- u32 tmp;
-
- if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]);
- tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1);
- WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp);
-}
-
-static void dce_v11_0_crtc_vline_int_ack(struct amdgpu_device *adev,
- int crtc)
-{
- u32 tmp;
-
- if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
- DRM_DEBUG("invalid crtc %d\n", crtc);
- return;
- }
-
- tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]);
- tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1);
- WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp);
-}
-
-static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- unsigned crtc = entry->src_id - 1;
- uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
- unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
-
- switch (entry->src_data[0]) {
- case 0: /* vblank */
- if (disp_int & interrupt_status_offsets[crtc].vblank)
- dce_v11_0_crtc_vblank_int_ack(adev, crtc);
- else
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
- }
- DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
-
- break;
- case 1: /* vline */
- if (disp_int & interrupt_status_offsets[crtc].vline)
- dce_v11_0_crtc_vline_int_ack(adev, crtc);
- else
- DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
-
- DRM_DEBUG("IH: D%d vline\n", crtc + 1);
-
- break;
- default:
- DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
- break;
- }
-
- return 0;
-}
-
-static int dce_v11_0_hpd_irq(struct amdgpu_device *adev,
- struct amdgpu_irq_src *source,
- struct amdgpu_iv_entry *entry)
-{
- uint32_t disp_int, mask;
- unsigned hpd;
-
- if (entry->src_data[0] >= adev->mode_info.num_hpd) {
- DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
- return 0;
- }
-
- hpd = entry->src_data[0];
- disp_int = RREG32(interrupt_status_offsets[hpd].reg);
- mask = interrupt_status_offsets[hpd].hpd;
-
- if (disp_int & mask) {
- dce_v11_0_hpd_int_ack(adev, hpd);
- schedule_work(&adev->hotplug_work);
- DRM_DEBUG("IH: HPD%d\n", hpd + 1);
- }
-
- return 0;
-}
-
-static int dce_v11_0_set_clockgating_state(void *handle,
- enum amd_clockgating_state state)
-{
- return 0;
-}
-
-static int dce_v11_0_set_powergating_state(void *handle,
- enum amd_powergating_state state)
-{
- return 0;
-}
-
-static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
- .name = "dce_v11_0",
- .early_init = dce_v11_0_early_init,
- .late_init = NULL,
- .sw_init = dce_v11_0_sw_init,
- .sw_fini = dce_v11_0_sw_fini,
- .hw_init = dce_v11_0_hw_init,
- .hw_fini = dce_v11_0_hw_fini,
- .suspend = dce_v11_0_suspend,
- .resume = dce_v11_0_resume,
- .is_idle = dce_v11_0_is_idle,
- .wait_for_idle = dce_v11_0_wait_for_idle,
- .soft_reset = dce_v11_0_soft_reset,
- .set_clockgating_state = dce_v11_0_set_clockgating_state,
- .set_powergating_state = dce_v11_0_set_powergating_state,
-};
-
-static void
-dce_v11_0_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
-
- amdgpu_encoder->pixel_clock = adjusted_mode->clock;
-
- /* need to call this here rather than in prepare() since we need some crtc info */
- amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-
- /* set scaler clears this on some chips */
- dce_v11_0_set_interleave(encoder->crtc, mode);
-
- if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
- dce_v11_0_afmt_enable(encoder, true);
- dce_v11_0_afmt_setmode(encoder, adjusted_mode);
- }
-}
-
-static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder)
-{
- struct amdgpu_device *adev = encoder->dev->dev_private;
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
-
- if ((amdgpu_encoder->active_device &
- (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
- (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
- ENCODER_OBJECT_ID_NONE)) {
- struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- if (dig) {
- dig->dig_encoder = dce_v11_0_pick_dig_encoder(encoder);
- if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
- dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
- }
- }
-
- amdgpu_atombios_scratch_regs_lock(adev, true);
-
- if (connector) {
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
-
- /* select the clock/data port if it uses a router */
- if (amdgpu_connector->router.cd_valid)
- amdgpu_i2c_router_select_cd_port(amdgpu_connector);
-
- /* turn eDP panel on for mode set */
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- amdgpu_atombios_encoder_set_edp_panel_power(connector,
- ATOM_TRANSMITTER_ACTION_POWER_ON);
- }
-
- /* this is needed for the pll/ss setup to work correctly in some cases */
- amdgpu_atombios_encoder_set_crtc_source(encoder);
- /* set up the FMT blocks */
- dce_v11_0_program_fmt(encoder);
-}
-
-static void dce_v11_0_encoder_commit(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
-
- /* need to call this here as we need the crtc set up */
- amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
- amdgpu_atombios_scratch_regs_lock(adev, false);
-}
-
-static void dce_v11_0_encoder_disable(struct drm_encoder *encoder)
-{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- struct amdgpu_encoder_atom_dig *dig;
-
- amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-
- if (amdgpu_atombios_encoder_is_digital(encoder)) {
- if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
- dce_v11_0_afmt_enable(encoder, false);
- dig = amdgpu_encoder->enc_priv;
- dig->dig_encoder = -1;
- }
- amdgpu_encoder->active_device = 0;
-}
-
-/* these are handled by the primary encoders */
-static void dce_v11_0_ext_prepare(struct drm_encoder *encoder)
-{
-
-}
-
-static void dce_v11_0_ext_commit(struct drm_encoder *encoder)
-{
-
-}
-
-static void
-dce_v11_0_ext_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-
-}
-
-static void dce_v11_0_ext_disable(struct drm_encoder *encoder)
-{
-
-}
-
-static void
-dce_v11_0_ext_dpms(struct drm_encoder *encoder, int mode)
-{
-
-}
-
-static const struct drm_encoder_helper_funcs dce_v11_0_ext_helper_funcs = {
- .dpms = dce_v11_0_ext_dpms,
- .prepare = dce_v11_0_ext_prepare,
- .mode_set = dce_v11_0_ext_mode_set,
- .commit = dce_v11_0_ext_commit,
- .disable = dce_v11_0_ext_disable,
- /* no detect for TMDS/LVDS yet */
-};
-
-static const struct drm_encoder_helper_funcs dce_v11_0_dig_helper_funcs = {
- .dpms = amdgpu_atombios_encoder_dpms,
- .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
- .prepare = dce_v11_0_encoder_prepare,
- .mode_set = dce_v11_0_encoder_mode_set,
- .commit = dce_v11_0_encoder_commit,
- .disable = dce_v11_0_encoder_disable,
- .detect = amdgpu_atombios_encoder_dig_detect,
-};
-
-static const struct drm_encoder_helper_funcs dce_v11_0_dac_helper_funcs = {
- .dpms = amdgpu_atombios_encoder_dpms,
- .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
- .prepare = dce_v11_0_encoder_prepare,
- .mode_set = dce_v11_0_encoder_mode_set,
- .commit = dce_v11_0_encoder_commit,
- .detect = amdgpu_atombios_encoder_dac_detect,
-};
-
-static void dce_v11_0_encoder_destroy(struct drm_encoder *encoder)
-{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
- if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
- kfree(amdgpu_encoder->enc_priv);
- drm_encoder_cleanup(encoder);
- kfree(amdgpu_encoder);
-}
-
-static const struct drm_encoder_funcs dce_v11_0_encoder_funcs = {
- .destroy = dce_v11_0_encoder_destroy,
-};
-
-static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
- uint32_t encoder_enum,
- uint32_t supported_device,
- u16 caps)
-{
- struct drm_device *dev = adev->ddev;
- struct drm_encoder *encoder;
- struct amdgpu_encoder *amdgpu_encoder;
-
- /* see if we already added it */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- amdgpu_encoder = to_amdgpu_encoder(encoder);
- if (amdgpu_encoder->encoder_enum == encoder_enum) {
- amdgpu_encoder->devices |= supported_device;
- return;
- }
-
- }
-
- /* add a new one */
- amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
- if (!amdgpu_encoder)
- return;
-
- encoder = &amdgpu_encoder->base;
- switch (adev->mode_info.num_crtc) {
- case 1:
- encoder->possible_crtcs = 0x1;
- break;
- case 2:
- default:
- encoder->possible_crtcs = 0x3;
- break;
- case 3:
- encoder->possible_crtcs = 0x7;
- break;
- case 4:
- encoder->possible_crtcs = 0xf;
- break;
- case 5:
- encoder->possible_crtcs = 0x1f;
- break;
- case 6:
- encoder->possible_crtcs = 0x3f;
- break;
- }
-
- amdgpu_encoder->enc_priv = NULL;
-
- amdgpu_encoder->encoder_enum = encoder_enum;
- amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- amdgpu_encoder->devices = supported_device;
- amdgpu_encoder->rmx_type = RMX_OFF;
- amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
- amdgpu_encoder->is_ext_encoder = false;
- amdgpu_encoder->caps = caps;
-
- switch (amdgpu_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- drm_encoder_helper_add(encoder, &dce_v11_0_dac_helper_funcs);
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
- if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- amdgpu_encoder->rmx_type = RMX_FULL;
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
- amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
- } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
- } else {
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
- amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
- }
- drm_encoder_helper_add(encoder, &dce_v11_0_dig_helper_funcs);
- break;
- case ENCODER_OBJECT_ID_SI170B:
- case ENCODER_OBJECT_ID_CH7303:
- case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
- case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
- case ENCODER_OBJECT_ID_TITFP513:
- case ENCODER_OBJECT_ID_VT1623:
- case ENCODER_OBJECT_ID_HDMI_SI1930:
- case ENCODER_OBJECT_ID_TRAVIS:
- case ENCODER_OBJECT_ID_NUTMEG:
- /* these are handled by the primary encoders */
- amdgpu_encoder->is_ext_encoder = true;
- if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
- else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- else
- drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
- drm_encoder_helper_add(encoder, &dce_v11_0_ext_helper_funcs);
- break;
- }
-}
-
-static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
- .set_vga_render_state = &dce_v11_0_set_vga_render_state,
- .bandwidth_update = &dce_v11_0_bandwidth_update,
- .vblank_get_counter = &dce_v11_0_vblank_get_counter,
- .vblank_wait = &dce_v11_0_vblank_wait,
- .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
- .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
- .hpd_sense = &dce_v11_0_hpd_sense,
- .hpd_set_polarity = &dce_v11_0_hpd_set_polarity,
- .hpd_get_gpio_reg = &dce_v11_0_hpd_get_gpio_reg,
- .page_flip = &dce_v11_0_page_flip,
- .page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos,
- .add_encoder = &dce_v11_0_encoder_add,
- .add_connector = &amdgpu_connector_add,
- .stop_mc_access = &dce_v11_0_stop_mc_access,
- .resume_mc_access = &dce_v11_0_resume_mc_access,
-};
-
-static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev)
-{
- if (adev->mode_info.funcs == NULL)
- adev->mode_info.funcs = &dce_v11_0_display_funcs;
-}
-
-static const struct amdgpu_irq_src_funcs dce_v11_0_crtc_irq_funcs = {
- .set = dce_v11_0_set_crtc_irq_state,
- .process = dce_v11_0_crtc_irq,
-};
-
-static const struct amdgpu_irq_src_funcs dce_v11_0_pageflip_irq_funcs = {
- .set = dce_v11_0_set_pageflip_irq_state,
- .process = dce_v11_0_pageflip_irq,
-};
-
-static const struct amdgpu_irq_src_funcs dce_v11_0_hpd_irq_funcs = {
- .set = dce_v11_0_set_hpd_irq_state,
- .process = dce_v11_0_hpd_irq,
-};
-
-static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev)
-{
- adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
- adev->crtc_irq.funcs = &dce_v11_0_crtc_irq_funcs;
-
- adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
- adev->pageflip_irq.funcs = &dce_v11_0_pageflip_irq_funcs;
-
- adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
- adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs;
-}
-
-const struct amdgpu_ip_block_version dce_v11_0_ip_block =
-{
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 0,
- .rev = 0,
- .funcs = &dce_v11_0_ip_funcs,
-};
-
-const struct amdgpu_ip_block_version dce_v11_2_ip_block =
-{
- .type = AMD_IP_BLOCK_TYPE_DCE,
- .major = 11,
- .minor = 2,
- .rev = 0,
- .funcs = &dce_v11_0_ip_funcs,
-};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 1f3552967ba3..acc887a58518 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -20,7 +20,15 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "drmP.h"
+
+#include <linux/pci.h>
+
+#include <drm/drm_edid.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_vblank.h>
+
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_i2c.h"
@@ -30,18 +38,27 @@
#include "atombios_encoders.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
+#include "amdgpu_display.h"
+
+#include "dce_v6_0.h"
+#include "sid.h"
#include "bif/bif_3_0_d.h"
#include "bif/bif_3_0_sh_mask.h"
+
#include "oss/oss_1_0_d.h"
#include "oss/oss_1_0_sh_mask.h"
+
#include "gca/gfx_6_0_d.h"
#include "gca/gfx_6_0_sh_mask.h"
+#include "gca/gfx_7_2_enum.h"
+
#include "gmc/gmc_6_0_d.h"
#include "gmc/gmc_6_0_sh_mask.h"
+
#include "dce/dce_6_0_d.h"
#include "dce/dce_6_0_sh_mask.h"
-#include "gca/gfx_7_2_enum.h"
+
#include "si_enums.h"
static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev);
@@ -49,31 +66,31 @@ static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev);
static const u32 crtc_offsets[6] =
{
- SI_CRTC0_REGISTER_OFFSET,
- SI_CRTC1_REGISTER_OFFSET,
- SI_CRTC2_REGISTER_OFFSET,
- SI_CRTC3_REGISTER_OFFSET,
- SI_CRTC4_REGISTER_OFFSET,
- SI_CRTC5_REGISTER_OFFSET
+ CRTC0_REGISTER_OFFSET,
+ CRTC1_REGISTER_OFFSET,
+ CRTC2_REGISTER_OFFSET,
+ CRTC3_REGISTER_OFFSET,
+ CRTC4_REGISTER_OFFSET,
+ CRTC5_REGISTER_OFFSET
};
static const u32 hpd_offsets[] =
{
- mmDC_HPD1_INT_STATUS - mmDC_HPD1_INT_STATUS,
- mmDC_HPD2_INT_STATUS - mmDC_HPD1_INT_STATUS,
- mmDC_HPD3_INT_STATUS - mmDC_HPD1_INT_STATUS,
- mmDC_HPD4_INT_STATUS - mmDC_HPD1_INT_STATUS,
- mmDC_HPD5_INT_STATUS - mmDC_HPD1_INT_STATUS,
- mmDC_HPD6_INT_STATUS - mmDC_HPD1_INT_STATUS,
+ HPD0_REGISTER_OFFSET,
+ HPD1_REGISTER_OFFSET,
+ HPD2_REGISTER_OFFSET,
+ HPD3_REGISTER_OFFSET,
+ HPD4_REGISTER_OFFSET,
+ HPD5_REGISTER_OFFSET
};
static const uint32_t dig_offsets[] = {
- SI_CRTC0_REGISTER_OFFSET,
- SI_CRTC1_REGISTER_OFFSET,
- SI_CRTC2_REGISTER_OFFSET,
- SI_CRTC3_REGISTER_OFFSET,
- SI_CRTC4_REGISTER_OFFSET,
- SI_CRTC5_REGISTER_OFFSET,
+ CRTC0_REGISTER_OFFSET,
+ CRTC1_REGISTER_OFFSET,
+ CRTC2_REGISTER_OFFSET,
+ CRTC3_REGISTER_OFFSET,
+ CRTC4_REGISTER_OFFSET,
+ CRTC5_REGISTER_OFFSET,
(0x13830 - 0x7030) >> 2,
};
@@ -118,72 +135,27 @@ static const struct {
static u32 dce_v6_0_audio_endpt_rreg(struct amdgpu_device *adev,
u32 block_offset, u32 reg)
{
- DRM_INFO("xxxx: dce_v6_0_audio_endpt_rreg ----no impl!!!!\n");
- return 0;
-}
-
-static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
- u32 block_offset, u32 reg, u32 v)
-{
- DRM_INFO("xxxx: dce_v6_0_audio_endpt_wreg ----no impl!!!!\n");
-}
-
-static bool dce_v6_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
-{
- if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) & CRTC_STATUS__CRTC_V_BLANK_MASK)
- return true;
- else
- return false;
-}
-
-static bool dce_v6_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
-{
- u32 pos1, pos2;
+ unsigned long flags;
+ u32 r;
- pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
- pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
+ spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
+ WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
+ r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
+ spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
- if (pos1 != pos2)
- return true;
- else
- return false;
+ return r;
}
-/**
- * dce_v6_0_wait_for_vblank - vblank wait asic callback.
- *
- * @crtc: crtc to wait for vblank on
- *
- * Wait for vblank on the requested crtc (evergreen+).
- */
-static void dce_v6_0_vblank_wait(struct amdgpu_device *adev, int crtc)
+static void dce_v6_0_audio_endpt_wreg(struct amdgpu_device *adev,
+ u32 block_offset, u32 reg, u32 v)
{
- unsigned i = 100;
+ unsigned long flags;
- if (crtc >= adev->mode_info.num_crtc)
- return;
-
- if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
- return;
-
- /* depending on when we hit vblank, we may be close to active; if so,
- * wait for another frame.
- */
- while (dce_v6_0_is_in_vblank(adev, crtc)) {
- if (i++ == 100) {
- i = 0;
- if (!dce_v6_0_is_counter_moving(adev, crtc))
- break;
- }
- }
-
- while (!dce_v6_0_is_in_vblank(adev, crtc)) {
- if (i++ == 100) {
- i = 0;
- if (!dce_v6_0_is_counter_moving(adev, crtc))
- break;
- }
- }
+ spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
+ WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset,
+ reg | AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_WRITE_EN_MASK);
+ WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
+ spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
}
static u32 dce_v6_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
@@ -218,6 +190,7 @@ static void dce_v6_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
* @crtc_id: crtc to cleanup pageflip on
* @crtc_base: new address of the crtc (GPU MC address)
+ * @async: asynchronous flip
*
* Does the actual pageflip (evergreen+).
* During vblank we take the crtc lock and wait for the update_pending
@@ -229,16 +202,20 @@ static void dce_v6_0_page_flip(struct amdgpu_device *adev,
int crtc_id, u64 crtc_base, bool async)
{
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+ struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
/* flip at hsync for async, default is vsync */
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
+ /* update pitch */
+ WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
+ fb->pitches[0] / fb->format->cpp[0]);
/* update the scanout addresses */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(crtc_base));
+ /* writing to the low address triggers the update */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
(u32)crtc_base);
-
/* post the write */
RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
}
@@ -248,11 +225,11 @@ static int dce_v6_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
{
if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
return -EINVAL;
+
*vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
*position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
return 0;
-
}
/**
@@ -272,7 +249,8 @@ static bool dce_v6_0_hpd_sense(struct amdgpu_device *adev,
if (hpd >= adev->mode_info.num_hpd)
return connected;
- if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) & DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
+ if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
+ DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
connected = true;
return connected;
@@ -303,6 +281,21 @@ static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
}
+static void dce_v6_0_hpd_int_ack(struct amdgpu_device *adev,
+ int hpd)
+{
+ u32 tmp;
+
+ if (hpd >= adev->mode_info.num_hpd) {
+ DRM_DEBUG("invalid hpd %d\n", hpd);
+ return;
+ }
+
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
+}
+
/**
* dce_v6_0_hpd_init - hpd setup callback.
*
@@ -313,11 +306,13 @@ static void dce_v6_0_hpd_set_polarity(struct amdgpu_device *adev,
*/
static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -340,10 +335,11 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
continue;
}
+ dce_v6_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
-
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -356,11 +352,13 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev)
*/
static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -368,10 +366,11 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev)
tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
- WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
+ WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
@@ -379,115 +378,33 @@ static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
return mmDC_GPIO_HPD_A;
}
-static u32 evergreen_get_vblank_counter(struct amdgpu_device* adev, int crtc)
-{
- if (crtc >= adev->mode_info.num_crtc)
- return 0;
- else
- return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
-}
-
-static void dce_v6_0_stop_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
+static bool dce_v6_0_is_display_hung(struct amdgpu_device *adev)
{
- u32 crtc_enabled, tmp, frame_count;
- int i, j;
-
- save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
- save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
-
- /* disable VGA render */
- WREG32(mmVGA_RENDER_CONTROL, 0);
+ u32 crtc_hung = 0;
+ u32 crtc_status[6];
+ u32 i, j, tmp;
- /* blank the display controllers */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- crtc_enabled = RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK;
- if (crtc_enabled) {
- save->crtc_enabled[i] = true;
- tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
-
- if (!(tmp & CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK)) {
- dce_v6_0_vblank_wait(adev, i);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp |= CRTC_BLANK_CONTROL__CRTC_BLANK_DATA_EN_MASK;
- WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- }
- /* wait for the next frame */
- frame_count = evergreen_get_vblank_counter(adev, i);
- for (j = 0; j < adev->usec_timeout; j++) {
- if (evergreen_get_vblank_counter(adev, i) != frame_count)
- break;
- udelay(1);
- }
-
- /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
- tmp &= ~CRTC_CONTROL__CRTC_MASTER_EN_MASK;
- WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- save->crtc_enabled[i] = false;
- /* ***** */
- } else {
- save->crtc_enabled[i] = false;
+ if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
+ crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ crtc_hung |= (1 << i);
}
}
-}
-
-static void dce_v6_0_resume_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- u32 tmp;
- int i, j;
-
- /* update crtc base addresses */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
- upper_32_bits(adev->mc.vram_start));
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
- upper_32_bits(adev->mc.vram_start));
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
- (u32)adev->mc.vram_start);
- WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
- (u32)adev->mc.vram_start);
- }
-
- WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
- WREG32(mmVGA_MEMORY_BASE_ADDRESS, (u32)adev->mc.vram_start);
- /* unlock regs and wait for update */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- if (save->crtc_enabled[i]) {
- tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
- if ((tmp & 0x7) != 0) {
- tmp &= ~0x7;
- WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if (tmp & GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK) {
- tmp &= ~GRPH_UPDATE__GRPH_UPDATE_LOCK_MASK;
- WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
- }
- tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
- if (tmp & 1) {
- tmp &= ~1;
- WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
- }
- for (j = 0; j < adev->usec_timeout; j++) {
- tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
- if ((tmp & GRPH_UPDATE__GRPH_SURFACE_UPDATE_PENDING_MASK) == 0)
- break;
- udelay(1);
+ for (j = 0; j < 10; j++) {
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (crtc_hung & (1 << i)) {
+ tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
+ if (tmp != crtc_status[i])
+ crtc_hung &= ~(1 << i);
}
}
+ if (crtc_hung == 0)
+ return false;
+ udelay(100);
}
- /* Unlock vga access */
- WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
- mdelay(1);
- WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
-
+ return true;
}
static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
@@ -495,27 +412,21 @@ static void dce_v6_0_set_vga_render_state(struct amdgpu_device *adev,
{
if (!render)
WREG32(mmVGA_RENDER_CONTROL,
- RREG32(mmVGA_RENDER_CONTROL) & VGA_VSTATUS_CNTL);
-
+ RREG32(mmVGA_RENDER_CONTROL) & ~VGA_RENDER_CONTROL__VGA_VSTATUS_CNTL_MASK);
}
static int dce_v6_0_get_num_crtc(struct amdgpu_device *adev)
{
- int num_crtc = 0;
-
switch (adev->asic_type) {
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
- num_crtc = 6;
- break;
+ return 6;
case CHIP_OLAND:
- num_crtc = 2;
- break;
+ return 2;
default:
- num_crtc = 0;
+ return 0;
}
- return num_crtc;
}
void dce_v6_0_disable_dce(struct amdgpu_device *adev)
@@ -544,9 +455,8 @@ void dce_v6_0_disable_dce(struct amdgpu_device *adev)
static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
{
-
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
@@ -600,7 +510,7 @@ static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
}
/**
- * cik_get_number_of_dram_channels - get the number of dram channels
+ * si_get_number_of_dram_channels - get the number of dram channels
*
* @adev: amdgpu_device pointer
*
@@ -983,8 +893,11 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
fixed20_12 a, b, c;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min_t(u32, line_time, 65535);
priority_a_cnt = 0;
priority_b_cnt = 0;
@@ -1017,8 +930,8 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
wm_high.dram_channels = dram_channels;
wm_high.num_heads = num_heads;
- if (adev->pm.dpm_enabled) {
/* watermark for low clocks */
+ if (adev->pm.dpm_enabled) {
wm_low.yclk =
amdgpu_dpm_get_mclk(adev, true) * 10;
wm_low.sclk =
@@ -1045,9 +958,9 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
wm_low.num_heads = num_heads;
/* set for high clocks */
- latency_watermark_a = min(dce_v6_0_latency_watermark(&wm_high), (u32)65535);
+ latency_watermark_a = min_t(u32, dce_v6_0_latency_watermark(&wm_high), 65535);
/* set for low clocks */
- latency_watermark_b = min(dce_v6_0_latency_watermark(&wm_low), (u32)65535);
+ latency_watermark_b = min_t(u32, dce_v6_0_latency_watermark(&wm_low), 65535);
/* possibly force display priority to high */
/* should really do this at mode validation time... */
@@ -1098,16 +1011,16 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
/* select wm A */
arb_control3 = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
tmp = arb_control3;
- tmp &= ~LATENCY_WATERMARK_MASK(3);
- tmp |= LATENCY_WATERMARK_MASK(1);
+ tmp &= ~(3 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
+ tmp |= (1 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
(line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
/* select wm B */
tmp = RREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset);
- tmp &= ~LATENCY_WATERMARK_MASK(3);
- tmp |= LATENCY_WATERMARK_MASK(2);
+ tmp &= ~(3 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
+ tmp |= (2 << DPG_PIPE_ARBITRATION_CONTROL3__URGENCY_WATERMARK_MASK__SHIFT);
WREG32(mmDPG_PIPE_ARBITRATION_CONTROL3 + amdgpu_crtc->crtc_offset, tmp);
WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
@@ -1121,13 +1034,26 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
/* save values for DPM */
amdgpu_crtc->line_time = line_time;
- amdgpu_crtc->wm_high = latency_watermark_a;
/* Save number of lines the linebuffer leads before the scanout */
amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}
/* watermark setup */
+/**
+ * dce_v6_0_line_buffer_adjust - Set up the line buffer
+ *
+ * @adev: amdgpu_device pointer
+ * @amdgpu_crtc: the selected display controller
+ * @mode: the current display mode on the selected display
+ * controller
+ * @other_mode: the display mode of another display controller
+ * that may be sharing the line buffer
+ *
+ * Setup up the line buffer allocation for
+ * the selected display controller (CIK).
+ * Returns the line buffer size in pixels.
+ */
static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
struct amdgpu_crtc *amdgpu_crtc,
struct drm_display_mode *mode,
@@ -1162,7 +1088,7 @@ static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
}
WREG32(mmDC_LB_MEMORY_SPLIT + amdgpu_crtc->crtc_offset,
- DC_LB_MEMORY_CONFIG(tmp));
+ (tmp << DC_LB_MEMORY_SPLIT__DC_LB_MEMORY_CONFIG__SHIFT));
WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
(buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
@@ -1189,7 +1115,6 @@ static u32 dce_v6_0_line_buffer_adjust(struct amdgpu_device *adev,
/**
- *
* dce_v6_0_bandwidth_update - program display watermarks
*
* @adev: amdgpu_device pointer
@@ -1207,7 +1132,7 @@ static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
if (!adev->mode_info.mode_config_initialized)
return;
- amdgpu_update_display_priority(adev);
+ amdgpu_display_update_priority(adev);
for (i = 0; i < adev->mode_info.num_crtc; i++) {
if (adev->mode_info.crtcs[i]->base.enabled)
@@ -1222,17 +1147,17 @@ static void dce_v6_0_bandwidth_update(struct amdgpu_device *adev)
dce_v6_0_program_watermarks(adev, adev->mode_info.crtcs[i+1], lb_size, num_heads);
}
}
-/*
+
static void dce_v6_0_audio_get_connected_pins(struct amdgpu_device *adev)
{
int i;
- u32 offset, tmp;
+ u32 tmp;
for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
- offset = adev->mode_info.audio.pin[i].offset;
- tmp = RREG32_AUDIO_ENDPT(offset,
- AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
- if (((tmp & PORT_CONNECTIVITY_MASK) >> PORT_CONNECTIVITY_SHIFT) == 1)
+ tmp = RREG32_AUDIO_ENDPT(adev->mode_info.audio.pin[i].offset,
+ ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
+ if (REG_GET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT,
+ PORT_CONNECTIVITY))
adev->mode_info.audio.pin[i].connected = false;
else
adev->mode_info.audio.pin[i].connected = true;
@@ -1254,102 +1179,612 @@ static struct amdgpu_audio_pin *dce_v6_0_audio_get_pin(struct amdgpu_device *ade
return NULL;
}
-static void dce_v6_0_afmt_audio_select_pin(struct drm_encoder *encoder)
+static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
- u32 offset;
if (!dig || !dig->afmt || !dig->afmt->pin)
return;
- offset = dig->afmt->offset;
-
- WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
- AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
-
+ WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
+ REG_SET_FIELD(0, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT,
+ dig->afmt->pin->id));
}
static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
- DRM_INFO("xxxx: dce_v6_0_audio_write_latency_fields---no imp!!!!!\n");
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ struct amdgpu_connector *amdgpu_connector = NULL;
+ int interlace = 0;
+ u32 tmp;
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+ if (connector->encoder == encoder) {
+ amdgpu_connector = to_amdgpu_connector(connector);
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+
+ if (!amdgpu_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ interlace = 1;
+
+ if (connector->latency_present[interlace]) {
+ tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+ VIDEO_LIPSYNC, connector->video_latency[interlace]);
+ tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+ AUDIO_LIPSYNC, connector->audio_latency[interlace]);
+ } else {
+ tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+ VIDEO_LIPSYNC, 0);
+ tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
+ AUDIO_LIPSYNC, 0);
+ }
+ WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
+ ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
}
static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
- DRM_INFO("xxxx: dce_v6_0_audio_write_speaker_allocation---no imp!!!!!\n");
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ struct amdgpu_connector *amdgpu_connector = NULL;
+ u8 *sadb = NULL;
+ int sad_count;
+ u32 tmp;
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+ if (connector->encoder == encoder) {
+ amdgpu_connector = to_amdgpu_connector(connector);
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+
+ if (!amdgpu_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb);
+ if (sad_count < 0) {
+ DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
+ sad_count = 0;
+ }
+
+ /* program the speaker allocation */
+ tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
+ ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+ tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ HDMI_CONNECTION, 0);
+ tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ DP_CONNECTION, 0);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)
+ tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ DP_CONNECTION, 1);
+ else
+ tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ HDMI_CONNECTION, 1);
+
+ if (sad_count)
+ tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ SPEAKER_ALLOCATION, sadb[0]);
+ else
+ tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
+ SPEAKER_ALLOCATION, 5); /* stereo */
+
+ WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
+ ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+
+ kfree(sadb);
}
static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
- DRM_INFO("xxxx: dce_v6_0_audio_write_sad_regs---no imp!!!!!\n");
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ u32 offset;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ struct amdgpu_connector *amdgpu_connector = NULL;
+ struct cea_sad *sads;
+ int i, sad_count;
+
+ static const u16 eld_reg_to_type[][2] = {
+ { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
+ { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
+ { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
+ { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
+ { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
+ { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
+ { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
+ { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
+ { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
+ { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
+ { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
+ { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
+ };
+ if (!dig || !dig->afmt || !dig->afmt->pin)
+ return;
+
+ offset = dig->afmt->pin->offset;
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+ if (connector->encoder == encoder) {
+ amdgpu_connector = to_amdgpu_connector(connector);
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+
+ if (!amdgpu_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads);
+ if (sad_count < 0)
+ DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
+ u32 value = 0;
+ u8 stereo_freqs = 0;
+ int max_channels = -1;
+ int j;
+
+ for (j = 0; j < sad_count; j++) {
+ struct cea_sad *sad = &sads[j];
+
+ if (sad->format == eld_reg_to_type[i][1]) {
+ if (sad->channels > max_channels) {
+ value = (sad->channels <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
+ (sad->byte2 <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
+ (sad->freq <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
+ max_channels = sad->channels;
+ }
+
+ if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
+ stereo_freqs |= sad->freq;
+ else
+ break;
+ }
+ }
+
+ value |= (stereo_freqs <<
+ AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
+
+ WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
+ }
+
+ kfree(sads);
}
-*/
+
static void dce_v6_0_audio_enable(struct amdgpu_device *adev,
struct amdgpu_audio_pin *pin,
bool enable)
{
- DRM_INFO("xxxx: dce_v6_0_audio_enable---no imp!!!!!\n");
+ if (!pin)
+ return;
+
+ WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
}
static const u32 pin_offsets[7] =
{
- (0x1780 - 0x1780),
- (0x1786 - 0x1780),
- (0x178c - 0x1780),
- (0x1792 - 0x1780),
- (0x1798 - 0x1780),
- (0x179d - 0x1780),
- (0x17a4 - 0x1780),
+ AUD0_REGISTER_OFFSET,
+ AUD1_REGISTER_OFFSET,
+ AUD2_REGISTER_OFFSET,
+ AUD3_REGISTER_OFFSET,
+ AUD4_REGISTER_OFFSET,
+ AUD5_REGISTER_OFFSET,
+ AUD6_REGISTER_OFFSET,
};
static int dce_v6_0_audio_init(struct amdgpu_device *adev)
{
+ int i;
+
+ if (!amdgpu_audio)
+ return 0;
+
+ adev->mode_info.audio.enabled = true;
+
+ switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ default:
+ adev->mode_info.audio.num_pins = 6;
+ break;
+ case CHIP_OLAND:
+ adev->mode_info.audio.num_pins = 2;
+ break;
+ }
+
+ for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+ adev->mode_info.audio.pin[i].channels = -1;
+ adev->mode_info.audio.pin[i].rate = -1;
+ adev->mode_info.audio.pin[i].bits_per_sample = -1;
+ adev->mode_info.audio.pin[i].status_bits = 0;
+ adev->mode_info.audio.pin[i].category_code = 0;
+ adev->mode_info.audio.pin[i].connected = false;
+ adev->mode_info.audio.pin[i].offset = pin_offsets[i];
+ adev->mode_info.audio.pin[i].id = i;
+ /* disable audio. it will be set up later */
+ /* XXX remove once we switch to ip funcs */
+ dce_v6_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
+ }
+
return 0;
}
static void dce_v6_0_audio_fini(struct amdgpu_device *adev)
{
+ if (!amdgpu_audio)
+ return;
+
+ if (!adev->mode_info.audio.enabled)
+ return;
+ adev->mode_info.audio.enabled = false;
}
-/*
-static void dce_v6_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
+static void dce_v6_0_audio_set_vbi_packet(struct drm_encoder *encoder)
{
- DRM_INFO("xxxx: dce_v6_0_afmt_update_ACR---no imp!!!!!\n");
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ u32 tmp;
+
+ tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
+ tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
+ tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1);
+ tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1);
+ WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
}
-*/
-/*
- * build a HDMI Video Info Frame
- */
-/*
-static void dce_v6_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
- void *buffer, size_t size)
+
+static void dce_v6_0_audio_set_acr(struct drm_encoder *encoder,
+ uint32_t clock, int bpc)
+{
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ u32 tmp;
+
+ tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
+ tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
+ tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE,
+ bpc > 8 ? 0 : 1);
+ WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
+
+ tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
+ tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
+ WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
+ tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
+ tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
+ WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
+
+ tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
+ tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
+ WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
+ tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
+ tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
+ WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
+
+ tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
+ tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
+ WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
+ tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
+ tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
+ WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
+}
+
+static void dce_v6_0_audio_set_avi_infoframe(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
{
- DRM_INFO("xxxx: dce_v6_0_afmt_update_avi_infoframe---no imp!!!!!\n");
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
+ struct hdmi_avi_infoframe frame;
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
+ uint8_t *payload = buffer + 3;
+ uint8_t *header = buffer;
+ ssize_t err;
+ u32 tmp;
+
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
+ if (err < 0) {
+ DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
+ return;
+ }
+
+ err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
+ if (err < 0) {
+ DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
+ return;
+ }
+
+ WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
+ payload[0x0] | (payload[0x1] << 8) | (payload[0x2] << 16) | (payload[0x3] << 24));
+ WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
+ payload[0x4] | (payload[0x5] << 8) | (payload[0x6] << 16) | (payload[0x7] << 24));
+ WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
+ payload[0x8] | (payload[0x9] << 8) | (payload[0xA] << 16) | (payload[0xB] << 24));
+ WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
+ payload[0xC] | (payload[0xD] << 8) | (header[1] << 24));
+
+ tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
+ /* anything other than 0 */
+ tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1,
+ HDMI_AUDIO_INFO_LINE, 2);
+ WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
}
static void dce_v6_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
{
- DRM_INFO("xxxx: dce_v6_0_audio_set_dto---no imp!!!!!\n");
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
+ int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
+ u32 tmp;
+
+ /*
+ * Two dtos: generally use dto0 for hdmi, dto1 for dp.
+ * Express [24MHz / target pixel clock] as an exact rational
+ * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
+ * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
+ */
+ tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
+ tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
+ DCCG_AUDIO_DTO0_SOURCE_SEL, amdgpu_crtc->crtc_id);
+ if (em == ATOM_ENCODER_MODE_HDMI) {
+ tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
+ DCCG_AUDIO_DTO_SEL, 0);
+ } else if (ENCODER_MODE_IS_DP(em)) {
+ tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE,
+ DCCG_AUDIO_DTO_SEL, 1);
+ }
+ WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
+ if (em == ATOM_ENCODER_MODE_HDMI) {
+ WREG32(mmDCCG_AUDIO_DTO0_PHASE, 24000);
+ WREG32(mmDCCG_AUDIO_DTO0_MODULE, clock);
+ } else if (ENCODER_MODE_IS_DP(em)) {
+ WREG32(mmDCCG_AUDIO_DTO1_PHASE, 24000);
+ WREG32(mmDCCG_AUDIO_DTO1_MODULE, clock);
+ }
+}
+
+static void dce_v6_0_audio_set_packet(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ u32 tmp;
+
+ tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
+ tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
+ WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
+
+ tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
+ tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
+ WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
+
+ tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
+ tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
+ WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
+
+ tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
+ tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
+ tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
+ tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
+ tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
+ tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
+ tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
+ WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
+
+ tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset);
+ tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, 0xff);
+ WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, tmp);
+
+ tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
+ tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
+ tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
+ WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
+
+ tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
+ tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_RESET_FIFO_WHEN_AUDIO_DIS, 1);
+ tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
+ WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
}
-*/
-/*
- * update the info frames with the data from the current display mode
- */
+
+static void dce_v6_0_audio_set_mute(struct drm_encoder *encoder, bool mute)
+{
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ u32 tmp;
+
+ tmp = RREG32(mmHDMI_GC + dig->afmt->offset);
+ tmp = REG_SET_FIELD(tmp, HDMI_GC, HDMI_GC_AVMUTE, mute ? 1 : 0);
+ WREG32(mmHDMI_GC + dig->afmt->offset, tmp);
+}
+
+static void dce_v6_0_audio_hdmi_enable(struct drm_encoder *encoder, bool enable)
+{
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ u32 tmp;
+
+ if (enable) {
+ tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
+ tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
+ tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
+ tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
+ tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
+ WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
+
+ tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
+ tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
+ WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
+
+ tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
+ tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
+ WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
+ } else {
+ tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
+ tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 0);
+ tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 0);
+ tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 0);
+ tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 0);
+ WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
+
+ tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
+ tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 0);
+ WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
+ }
+}
+
+static void dce_v6_0_audio_dp_enable(struct drm_encoder *encoder, bool enable)
+{
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ u32 tmp;
+
+ if (enable) {
+ tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
+ tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
+ WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
+
+ tmp = RREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset);
+ tmp = REG_SET_FIELD(tmp, DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, 1);
+ WREG32(mmDP_SEC_TIMESTAMP + dig->afmt->offset, tmp);
+
+ tmp = RREG32(mmDP_SEC_CNTL + dig->afmt->offset);
+ tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1);
+ tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_ATP_ENABLE, 1);
+ tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_AIP_ENABLE, 1);
+ tmp = REG_SET_FIELD(tmp, DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1);
+ WREG32(mmDP_SEC_CNTL + dig->afmt->offset, tmp);
+ } else {
+ WREG32(mmDP_SEC_CNTL + dig->afmt->offset, 0);
+ }
+}
+
static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
- DRM_INFO("xxxx: dce_v6_0_afmt_setmode ----no impl !!!!!!!!\n");
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ struct amdgpu_connector *amdgpu_connector = NULL;
+ int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
+ int bpc = 8;
+
+ if (!dig || !dig->afmt)
+ return;
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+ if (connector->encoder == encoder) {
+ amdgpu_connector = to_amdgpu_connector(connector);
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+
+ if (!amdgpu_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ if (!dig->afmt->enabled)
+ return;
+
+ dig->afmt->pin = dce_v6_0_audio_get_pin(adev);
+ if (!dig->afmt->pin)
+ return;
+
+ if (encoder->crtc) {
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
+ bpc = amdgpu_crtc->bpc;
+ }
+
+ /* disable audio before setting up hw */
+ dce_v6_0_audio_enable(adev, dig->afmt->pin, false);
+
+ dce_v6_0_audio_set_mute(encoder, true);
+ dce_v6_0_audio_write_speaker_allocation(encoder);
+ dce_v6_0_audio_write_sad_regs(encoder);
+ dce_v6_0_audio_write_latency_fields(encoder, mode);
+ if (em == ATOM_ENCODER_MODE_HDMI) {
+ dce_v6_0_audio_set_dto(encoder, mode->clock);
+ dce_v6_0_audio_set_vbi_packet(encoder);
+ dce_v6_0_audio_set_acr(encoder, mode->clock, bpc);
+ } else if (ENCODER_MODE_IS_DP(em)) {
+ dce_v6_0_audio_set_dto(encoder, adev->clock.default_dispclk * 10);
+ }
+ dce_v6_0_audio_set_packet(encoder);
+ dce_v6_0_audio_select_pin(encoder);
+ dce_v6_0_audio_set_avi_infoframe(encoder, mode);
+ dce_v6_0_audio_set_mute(encoder, false);
+ if (em == ATOM_ENCODER_MODE_HDMI) {
+ dce_v6_0_audio_hdmi_enable(encoder, 1);
+ } else if (ENCODER_MODE_IS_DP(em)) {
+ dce_v6_0_audio_dp_enable(encoder, 1);
+ }
+
+ /* enable audio after setting up hw */
+ dce_v6_0_audio_enable(adev, dig->afmt->pin, true);
}
static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -1359,6 +1794,7 @@ static void dce_v6_0_afmt_enable(struct drm_encoder *encoder, bool enable)
/* Silent, r600_hdmi_enable will raise WARN for us */
if (enable && dig->afmt->enabled)
return;
+
if (!enable && !dig->afmt->enabled)
return;
@@ -1422,7 +1858,7 @@ static void dce_v6_0_vga_enable(struct drm_crtc *crtc, bool enable)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u32 vga_control;
vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
@@ -1433,7 +1869,7 @@ static void dce_v6_0_grph_enable(struct drm_crtc *crtc, bool enable)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, enable ? 1 : 0);
}
@@ -1444,18 +1880,16 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_framebuffer *amdgpu_fb;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct amdgpu_bo *abo;
uint64_t fb_location, tiling_flags;
uint32_t fb_format, fb_pitch_pixels, pipe_config;
- u32 fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_NONE);
+ u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1463,103 +1897,110 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
return 0;
}
- if (atomic) {
- amdgpu_fb = to_amdgpu_framebuffer(fb);
+ if (atomic)
target_fb = fb;
- } else {
- amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+ else
target_fb = crtc->primary->fb;
- }
/* If atomic, assume fb object is pinned & idle & fenced and
* just update base pointers
*/
- obj = amdgpu_fb->obj;
+ obj = target_fb->obj[0];
abo = gem_to_amdgpu_bo(obj);
r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
return r;
- if (atomic) {
- fb_location = amdgpu_bo_gpu_offset(abo);
- } else {
- r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+ if (!atomic) {
+ abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
+ fb_location = amdgpu_bo_gpu_offset(abo);
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
amdgpu_bo_unreserve(abo);
switch (target_fb->format->format) {
case DRM_FORMAT_C8:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) |
- GRPH_FORMAT(GRPH_FORMAT_INDEXED));
+ fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
break;
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_ARGB4444:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB4444));
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_ARGB1555:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB1555));
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_BGRX5551:
case DRM_FORMAT_BGRA5551:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
- GRPH_FORMAT(GRPH_FORMAT_BGRA5551));
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_RGB565:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_16BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB565));
+ fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN16);
+ fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB8888));
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
+ fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
break;
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
- GRPH_FORMAT(GRPH_FORMAT_ARGB2101010));
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
+ fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
bypass_lut = true;
break;
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_BGRA1010102:
- fb_format = (GRPH_DEPTH(GRPH_DEPTH_32BPP) |
- GRPH_FORMAT(GRPH_FORMAT_BGRA1010102));
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
#ifdef __BIG_ENDIAN
- fb_swap = GRPH_ENDIAN_SWAP(GRPH_ENDIAN_8IN32);
+ fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
#endif
/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
bypass_lut = true;
break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
+ fb_swap = ((GRPH_RED_SEL_B << GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT) |
+ (GRPH_BLUE_SEL_R << GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT));
+#ifdef __BIG_ENDIAN
+ fb_swap |= (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
+#endif
+ break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->format->format, &format_name));
+ DRM_ERROR("Unsupported screen format %p4cc\n",
+ &target_fb->format->format);
return -EINVAL;
}
@@ -1572,18 +2013,18 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
- fb_format |= GRPH_NUM_BANKS(num_banks);
- fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_2D_TILED_THIN1);
- fb_format |= GRPH_TILE_SPLIT(tile_split);
- fb_format |= GRPH_BANK_WIDTH(bankw);
- fb_format |= GRPH_BANK_HEIGHT(bankh);
- fb_format |= GRPH_MACRO_TILE_ASPECT(mtaspect);
+ fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
+ fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
+ fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
+ fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
+ fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
+ fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
- fb_format |= GRPH_ARRAY_MODE(GRPH_ARRAY_1D_TILED_THIN1);
+ fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
}
pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
- fb_format |= GRPH_PIPE_CONFIG(pipe_config);
+ fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
dce_v6_0_vga_enable(crtc, false);
@@ -1599,7 +2040,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
(u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
- (u32) fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
+ (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
@@ -1643,8 +2084,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
if (!atomic && fb && fb != crtc->primary->fb) {
- amdgpu_fb = to_amdgpu_framebuffer(fb);
- abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ abo = gem_to_amdgpu_bo(fb->obj[0]);
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0))
return r;
@@ -1663,36 +2103,36 @@ static void dce_v6_0_set_interleave(struct drm_crtc *crtc,
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset,
- INTERLEAVE_EN);
+ DATA_FORMAT__INTERLEAVE_EN_MASK);
else
WREG32(mmDATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
}
static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
{
-
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ u16 *r, *g, *b;
int i;
DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
- (0 << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
+ ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
+ (INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
- (0 << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
+ ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
+ (INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
@@ -1708,27 +2148,30 @@ static void dce_v6_0_crtc_load_lut(struct drm_crtc *crtc)
WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
for (i = 0; i < 256; i++) {
WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
- (amdgpu_crtc->lut_r[i] << 20) |
- (amdgpu_crtc->lut_g[i] << 10) |
- (amdgpu_crtc->lut_b[i] << 0));
+ ((*r++ & 0xffc0) << 14) |
+ ((*g++ & 0xffc0) << 4) |
+ (*b++ >> 6));
}
WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
- (0 << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
- ICON_DEGAMMA_MODE(0) |
- (0 << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
+ ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
+ (DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
+ (DEGAMMA_BYPASS << DEGAMMA_CONTROL__ICON_DEGAMMA_MODE__SHIFT) |
+ (DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
- (0 << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
+ ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
+ (GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
- (0 << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
+ ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
+ (REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
- ((0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
- (0 << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
+ ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
+ (OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
/* XXX match this to the depth of the crtc fmt block, move to modeset? */
WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
@@ -1774,7 +2217,7 @@ static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u32 pll_in_use;
int pll;
@@ -1803,7 +2246,7 @@ static u32 dce_v6_0_pick_pll(struct drm_crtc *crtc)
static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
{
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
uint32_t cur_lock;
@@ -1818,37 +2261,34 @@ static void dce_v6_0_lock_cursor(struct drm_crtc *crtc, bool lock)
static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
-
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
- (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
- (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
-
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+ (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+ (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
}
static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(amdgpu_crtc->cursor_addr));
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
lower_32_bits(amdgpu_crtc->cursor_addr));
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
- CUR_CONTROL__CURSOR_EN_MASK |
- (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
- (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
-
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+ CUR_CONTROL__CURSOR_EN_MASK |
+ (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+ (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
}
static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
int x, int y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
int xorigin = 0, yorigin = 0;
int w = amdgpu_crtc->cursor_width;
@@ -1925,17 +2365,19 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
aobj = gem_to_amdgpu_bo(obj);
ret = amdgpu_bo_reserve(aobj, false);
if (ret != 0) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
- ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM, &amdgpu_crtc->cursor_addr);
+ aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
+ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
dce_v6_0_lock_cursor(crtc, true);
@@ -1967,7 +2409,7 @@ unpin:
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
- drm_gem_object_unreference_unlocked(amdgpu_crtc->cursor_bo);
+ drm_gem_object_put(amdgpu_crtc->cursor_bo);
}
amdgpu_crtc->cursor_bo = obj;
@@ -1993,15 +2435,6 @@ static int dce_v6_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, uint32_t size,
struct drm_modeset_acquire_ctx *ctx)
{
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- int i;
-
- /* userspace palettes are always correct as is */
- for (i = 0; i < size; i++) {
- amdgpu_crtc->lut_r[i] = red[i] >> 6;
- amdgpu_crtc->lut_g[i] = green[i] >> 6;
- amdgpu_crtc->lut_b[i] = blue[i] >> 6;
- }
dce_v6_0_crtc_load_lut(crtc);
return 0;
@@ -2019,15 +2452,19 @@ static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
.cursor_set2 = dce_v6_0_crtc_cursor_set2,
.cursor_move = dce_v6_0_crtc_cursor_move,
.gamma_set = dce_v6_0_crtc_gamma_set,
- .set_config = amdgpu_crtc_set_config,
+ .set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v6_0_crtc_destroy,
- .page_flip_target = amdgpu_crtc_page_flip_target,
+ .page_flip_target = amdgpu_display_crtc_page_flip_target,
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
+ .enable_vblank = amdgpu_enable_vblank_kms,
+ .disable_vblank = amdgpu_disable_vblank_kms,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
unsigned type;
@@ -2037,7 +2474,8 @@ static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
/* Make sure VBLANK and PFLIP interrupts are still enabled */
- type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
+ type = amdgpu_display_crtc_idx_to_irq_type(adev,
+ amdgpu_crtc->crtc_id);
amdgpu_irq_update(adev, &adev->crtc_irq, type);
amdgpu_irq_update(adev, &adev->pageflip_irq, type);
drm_crtc_vblank_on(crtc);
@@ -2054,7 +2492,7 @@ static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
break;
}
/* adjust pm to dpms */
- amdgpu_pm_compute_clocks(adev);
+ amdgpu_dpm_compute_clocks(adev);
}
static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc)
@@ -2076,18 +2514,16 @@ static void dce_v6_0_crtc_disable(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_atom_ss ss;
int i;
dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
if (crtc->primary->fb) {
int r;
- struct amdgpu_framebuffer *amdgpu_fb;
struct amdgpu_bo *abo;
- amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
- abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r))
DRM_ERROR("failed to reserve abo before unpin\n");
@@ -2156,7 +2592,6 @@ static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
-
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_encoder *encoder;
@@ -2174,7 +2609,7 @@ static bool dce_v6_0_crtc_mode_fixup(struct drm_crtc *crtc,
amdgpu_crtc->connector = NULL;
return false;
}
- if (!amdgpu_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
+ if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
return false;
@@ -2198,7 +2633,7 @@ static int dce_v6_0_crtc_set_base_atomic(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, enum mode_set_atomic state)
{
- return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
+ return dce_v6_0_crtc_do_set_base(crtc, fb, x, y, 1);
}
static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
@@ -2209,21 +2644,46 @@ static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
.mode_set_base_atomic = dce_v6_0_crtc_set_base_atomic,
.prepare = dce_v6_0_crtc_prepare,
.commit = dce_v6_0_crtc_commit,
- .load_lut = dce_v6_0_crtc_load_lut,
.disable = dce_v6_0_crtc_disable,
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
+};
+
+static void dce_v6_0_panic_flush(struct drm_plane *plane)
+{
+ struct drm_framebuffer *fb;
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct amdgpu_device *adev;
+ uint32_t fb_format;
+
+ if (!plane->fb)
+ return;
+
+ fb = plane->fb;
+ amdgpu_crtc = to_amdgpu_crtc(plane->crtc);
+ adev = drm_to_adev(fb->dev);
+
+ /* Disable DC tiling */
+ fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
+ fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK;
+ WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
+
+}
+
+static const struct drm_plane_helper_funcs dce_v6_0_drm_primary_plane_helper_funcs = {
+ .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
+ .panic_flush = dce_v6_0_panic_flush,
};
static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
{
struct amdgpu_crtc *amdgpu_crtc;
- int i;
amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
(AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
if (amdgpu_crtc == NULL)
return -ENOMEM;
- drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
+ drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v6_0_crtc_funcs);
drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
amdgpu_crtc->crtc_id = index;
@@ -2231,14 +2691,8 @@ static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->max_cursor_width = CURSOR_WIDTH;
amdgpu_crtc->max_cursor_height = CURSOR_HEIGHT;
- adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
- adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
-
- for (i = 0; i < 256; i++) {
- amdgpu_crtc->lut_r[i] = i << 2;
- amdgpu_crtc->lut_g[i] = i << 2;
- amdgpu_crtc->lut_b[i] = i << 2;
- }
+ adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
+ adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
@@ -2247,19 +2701,19 @@ static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v6_0_crtc_helper_funcs);
+ drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v6_0_drm_primary_plane_helper_funcs);
return 0;
}
-static int dce_v6_0_early_init(void *handle)
+static int dce_v6_0_early_init(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
adev->audio_endpt_rreg = &dce_v6_0_audio_endpt_rreg;
adev->audio_endpt_wreg = &dce_v6_0_audio_endpt_wreg;
dce_v6_0_set_display_funcs(adev);
- dce_v6_0_set_irq_funcs(adev);
adev->mode_info.num_crtc = dce_v6_0_get_num_crtc(adev);
@@ -2278,48 +2732,49 @@ static int dce_v6_0_early_init(void *handle)
return -EINVAL;
}
+ dce_v6_0_set_irq_funcs(adev);
+
return 0;
}
-static int dce_v6_0_sw_init(void *handle)
+static int dce_v6_0_sw_init(struct amdgpu_ip_block *ip_block)
{
int r, i;
- bool ret;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
for (i = 0; i < adev->mode_info.num_crtc; i++) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
if (r)
return r;
}
for (i = 8; i < 20; i += 2) {
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
if (r)
return r;
}
/* HPD hotplug */
- r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
+ r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
if (r)
return r;
adev->mode_info.mode_config_initialized = true;
- adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
- adev->ddev->mode_config.async_page_flip = true;
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
- adev->ddev->mode_config.preferred_depth = 24;
- adev->ddev->mode_config.prefer_shadow = 1;
- adev->ddev->mode_config.fb_base = adev->mc.aper_base;
+ adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
+ adev_to_drm(adev)->mode_config.async_page_flip = true;
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
+ adev_to_drm(adev)->mode_config.preferred_depth = 24;
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+ adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
- r = amdgpu_modeset_create_props(adev);
+ r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
- adev->ddev->mode_config.max_width = 16384;
- adev->ddev->mode_config.max_height = 16384;
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
/* allocate crtcs */
for (i = 0; i < adev->mode_info.num_crtc; i++) {
@@ -2328,9 +2783,8 @@ static int dce_v6_0_sw_init(void *handle)
return r;
}
- ret = amdgpu_atombios_get_connector_info_from_object_table(adev);
- if (ret)
- amdgpu_print_display_setup(adev->ddev);
+ if (amdgpu_atombios_get_connector_info_from_object_table(adev))
+ amdgpu_display_print_display_setup(adev_to_drm(adev));
else
return -EINVAL;
@@ -2343,33 +2797,47 @@ static int dce_v6_0_sw_init(void *handle)
if (r)
return r;
- drm_kms_helper_poll_init(adev->ddev);
+ /* Disable vblank IRQs aggressively for power-saving */
+ /* XXX: can this be enabled for DC? */
+ adev_to_drm(adev)->vblank_disable_immediate = true;
+
+ r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
+ if (r)
+ return r;
+
+ /* Pre-DCE11 */
+ INIT_DELAYED_WORK(&adev->hotplug_work,
+ amdgpu_display_hotplug_work_func);
+
+ drm_kms_helper_poll_init(adev_to_drm(adev));
return r;
}
-static int dce_v6_0_sw_fini(void *handle)
+static int dce_v6_0_sw_fini(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
- kfree(adev->mode_info.bios_hardcoded_edid);
+ drm_edid_free(adev->mode_info.bios_hardcoded_edid);
- drm_kms_helper_poll_fini(adev->ddev);
+ drm_kms_helper_poll_fini(adev_to_drm(adev));
dce_v6_0_audio_fini(adev);
dce_v6_0_afmt_fini(adev);
- drm_mode_config_cleanup(adev->ddev);
+ drm_mode_config_cleanup(adev_to_drm(adev));
adev->mode_info.mode_config_initialized = false;
return 0;
}
-static int dce_v6_0_hw_init(void *handle)
+static int dce_v6_0_hw_init(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
+ /* disable vga render */
+ dce_v6_0_set_vga_render_state(adev, false);
/* init dig PHYs, disp eng pll */
amdgpu_atombios_encoder_init_dig(adev);
amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
@@ -2386,10 +2854,10 @@ static int dce_v6_0_hw_init(void *handle)
return 0;
}
-static int dce_v6_0_hw_fini(void *handle)
+static int dce_v6_0_hw_fini(struct amdgpu_ip_block *ip_block)
{
int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
dce_v6_0_hpd_fini(adev);
@@ -2399,20 +2867,34 @@ static int dce_v6_0_hw_fini(void *handle)
dce_v6_0_pageflip_interrupt_fini(adev);
+ flush_delayed_work(&adev->hotplug_work);
+
return 0;
}
-static int dce_v6_0_suspend(void *handle)
+static int dce_v6_0_suspend(struct amdgpu_ip_block *ip_block)
{
- return dce_v6_0_hw_fini(handle);
+ struct amdgpu_device *adev = ip_block->adev;
+ int r;
+
+ r = amdgpu_display_suspend_helper(adev);
+ if (r)
+ return r;
+ adev->mode_info.bl_level =
+ amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
+
+ return dce_v6_0_hw_fini(ip_block);
}
-static int dce_v6_0_resume(void *handle)
+static int dce_v6_0_resume(struct amdgpu_ip_block *ip_block)
{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = ip_block->adev;
int ret;
- ret = dce_v6_0_hw_init(handle);
+ amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
+ adev->mode_info.bl_level);
+
+ ret = dce_v6_0_hw_init(ip_block);
/* turn on the BL */
if (adev->mode_info.bl_encoder) {
@@ -2421,23 +2903,41 @@ static int dce_v6_0_resume(void *handle)
amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
bl_level);
}
+ if (ret)
+ return ret;
- return ret;
+ return amdgpu_display_resume_helper(adev);
}
-static bool dce_v6_0_is_idle(void *handle)
+static bool dce_v6_0_is_idle(struct amdgpu_ip_block *ip_block)
{
return true;
}
-static int dce_v6_0_wait_for_idle(void *handle)
+static int dce_v6_0_soft_reset(struct amdgpu_ip_block *ip_block)
{
- return 0;
-}
+ u32 srbm_soft_reset = 0, tmp;
+ struct amdgpu_device *adev = ip_block->adev;
-static int dce_v6_0_soft_reset(void *handle)
-{
- DRM_INFO("xxxx: dce_v6_0_soft_reset --- no impl!!\n");
+ if (dce_v6_0_is_display_hung(adev))
+ srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
+
+ if (srbm_soft_reset) {
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+ tmp |= srbm_soft_reset;
+ dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~srbm_soft_reset;
+ WREG32(mmSRBM_SOFT_RESET, tmp);
+ tmp = RREG32(mmSRBM_SOFT_RESET);
+
+ /* Wait a little for things to settle down */
+ udelay(50);
+ }
return 0;
}
@@ -2454,22 +2954,22 @@ static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
switch (crtc) {
case 0:
- reg_block = SI_CRTC0_REGISTER_OFFSET;
+ reg_block = CRTC0_REGISTER_OFFSET;
break;
case 1:
- reg_block = SI_CRTC1_REGISTER_OFFSET;
+ reg_block = CRTC1_REGISTER_OFFSET;
break;
case 2:
- reg_block = SI_CRTC2_REGISTER_OFFSET;
+ reg_block = CRTC2_REGISTER_OFFSET;
break;
case 3:
- reg_block = SI_CRTC3_REGISTER_OFFSET;
+ reg_block = CRTC3_REGISTER_OFFSET;
break;
case 4:
- reg_block = SI_CRTC4_REGISTER_OFFSET;
+ reg_block = CRTC4_REGISTER_OFFSET;
break;
case 5:
- reg_block = SI_CRTC5_REGISTER_OFFSET;
+ reg_block = CRTC5_REGISTER_OFFSET;
break;
default:
DRM_DEBUG("invalid crtc %d\n", crtc);
@@ -2479,12 +2979,12 @@ static void dce_v6_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
interrupt_mask = RREG32(mmINT_MASK + reg_block);
- interrupt_mask &= ~VBLANK_INT_MASK;
+ interrupt_mask &= ~INT_MASK__VBLANK_INT_MASK;
WREG32(mmINT_MASK + reg_block, interrupt_mask);
break;
case AMDGPU_IRQ_STATE_ENABLE:
interrupt_mask = RREG32(mmINT_MASK + reg_block);
- interrupt_mask |= VBLANK_INT_MASK;
+ interrupt_mask |= INT_MASK__VBLANK_INT_MASK;
WREG32(mmINT_MASK + reg_block, interrupt_mask);
break;
default:
@@ -2499,28 +2999,28 @@ static void dce_v6_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
}
-static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
+static int dce_v6_0_set_hpd_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
- unsigned type,
+ unsigned hpd,
enum amdgpu_interrupt_state state)
{
u32 dc_hpd_int_cntl;
- if (type >= adev->mode_info.num_hpd) {
- DRM_DEBUG("invalid hdp %d\n", type);
+ if (hpd >= adev->mode_info.num_hpd) {
+ DRM_DEBUG("invalid hpd %d\n", hpd);
return 0;
}
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
- dc_hpd_int_cntl &= ~DC_HPDx_INT_EN;
- WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
+ dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], dc_hpd_int_cntl);
break;
case AMDGPU_IRQ_STATE_ENABLE:
- dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
- dc_hpd_int_cntl |= DC_HPDx_INT_EN;
- WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
+ dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], dc_hpd_int_cntl);
break;
default:
break;
@@ -2529,7 +3029,7 @@ static int dce_v6_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
return 0;
}
-static int dce_v6_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
+static int dce_v6_0_set_crtc_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -2583,23 +3083,24 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
{
unsigned crtc = entry->src_id - 1;
uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
- unsigned irq_type = amdgpu_crtc_idx_to_irq_type(adev, crtc);
+ unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
+ crtc);
switch (entry->src_data[0]) {
case 0: /* vblank */
if (disp_int & interrupt_status_offsets[crtc].vblank)
- WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_ACK);
+ WREG32(mmVBLANK_STATUS + crtc_offsets[crtc], VBLANK_STATUS__VBLANK_ACK_MASK);
else
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
if (amdgpu_irq_enabled(adev, source, irq_type)) {
- drm_handle_vblank(adev->ddev, crtc);
+ drm_handle_vblank(adev_to_drm(adev), crtc);
}
DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
break;
case 1: /* vline */
if (disp_int & interrupt_status_offsets[crtc].vline)
- WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_ACK);
+ WREG32(mmVLINE_STATUS + crtc_offsets[crtc], VLINE_STATUS__VLINE_ACK_MASK);
else
DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
@@ -2613,7 +3114,7 @@ static int dce_v6_0_crtc_irq(struct amdgpu_device *adev,
return 0;
}
-static int dce_v6_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
+static int dce_v6_0_set_pageflip_irq_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
@@ -2640,7 +3141,7 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- unsigned long flags;
+ unsigned long flags;
unsigned crtc_id;
struct amdgpu_crtc *amdgpu_crtc;
struct amdgpu_flip_work *works;
@@ -2662,14 +3163,14 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
if (amdgpu_crtc == NULL)
return 0;
- spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
works = amdgpu_crtc->pflip_works;
- if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
"AMDGPU_FLIP_SUBMITTED(%d)\n",
amdgpu_crtc->pflip_status,
AMDGPU_FLIP_SUBMITTED);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
return 0;
}
@@ -2681,7 +3182,7 @@ static int dce_v6_0_pageflip_irq(struct amdgpu_device *adev,
if (works->event)
drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
drm_crtc_vblank_put(&amdgpu_crtc->base);
schedule_work(&works->unpin_work);
@@ -2693,7 +3194,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- uint32_t disp_int, mask, tmp;
+ uint32_t disp_int, mask;
unsigned hpd;
if (entry->src_data[0] >= adev->mode_info.num_hpd) {
@@ -2706,24 +3207,21 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
mask = interrupt_status_offsets[hpd].hpd;
if (disp_int & mask) {
- tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
- tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
- WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
- schedule_work(&adev->hotplug_work);
- DRM_INFO("IH: HPD%d\n", hpd + 1);
+ dce_v6_0_hpd_int_ack(adev, hpd);
+ schedule_delayed_work(&adev->hotplug_work, 0);
+ DRM_DEBUG("IH: HPD%d\n", hpd + 1);
}
return 0;
-
}
-static int dce_v6_0_set_clockgating_state(void *handle,
+static int dce_v6_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
enum amd_clockgating_state state)
{
return 0;
}
-static int dce_v6_0_set_powergating_state(void *handle,
+static int dce_v6_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
enum amd_powergating_state state)
{
return 0;
@@ -2732,7 +3230,6 @@ static int dce_v6_0_set_powergating_state(void *handle,
static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
.name = "dce_v6_0",
.early_init = dce_v6_0_early_init,
- .late_init = NULL,
.sw_init = dce_v6_0_sw_init,
.sw_fini = dce_v6_0_sw_fini,
.hw_init = dce_v6_0_hw_init,
@@ -2740,19 +3237,17 @@ static const struct amd_ip_funcs dce_v6_0_ip_funcs = {
.suspend = dce_v6_0_suspend,
.resume = dce_v6_0_resume,
.is_idle = dce_v6_0_is_idle,
- .wait_for_idle = dce_v6_0_wait_for_idle,
.soft_reset = dce_v6_0_soft_reset,
.set_clockgating_state = dce_v6_0_set_clockgating_state,
.set_powergating_state = dce_v6_0_set_powergating_state,
};
-static void
-dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
+static void dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
-
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
amdgpu_encoder->pixel_clock = adjusted_mode->clock;
@@ -2762,7 +3257,7 @@ dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
/* set scaler clears this on some chips */
dce_v6_0_set_interleave(encoder->crtc, mode);
- if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+ if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em)) {
dce_v6_0_afmt_enable(encoder, true);
dce_v6_0_afmt_setmode(encoder, adjusted_mode);
}
@@ -2770,8 +3265,7 @@ dce_v6_0_encoder_mode_set(struct drm_encoder *encoder,
static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
{
-
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -2810,9 +3304,8 @@ static void dce_v6_0_encoder_prepare(struct drm_encoder *encoder)
static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
{
-
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
/* need to call this here as we need the crtc set up */
amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -2821,14 +3314,14 @@ static void dce_v6_0_encoder_commit(struct drm_encoder *encoder)
static void dce_v6_0_encoder_disable(struct drm_encoder *encoder)
{
-
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig;
+ int em = amdgpu_atombios_encoder_get_encoder_mode(encoder);
amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
if (amdgpu_atombios_encoder_is_digital(encoder)) {
- if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
+ if (em == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(em))
dce_v6_0_afmt_enable(encoder, false);
dig = amdgpu_encoder->enc_priv;
dig->dig_encoder = -1;
@@ -2847,8 +3340,7 @@ static void dce_v6_0_ext_commit(struct drm_encoder *encoder)
}
-static void
-dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
+static void dce_v6_0_ext_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
@@ -2860,8 +3352,7 @@ static void dce_v6_0_ext_disable(struct drm_encoder *encoder)
}
-static void
-dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
+static void dce_v6_0_ext_dpms(struct drm_encoder *encoder, int mode)
{
}
@@ -2921,7 +3412,7 @@ static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
uint32_t supported_device,
u16 caps)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_encoder *encoder;
struct amdgpu_encoder *amdgpu_encoder;
@@ -2932,7 +3423,6 @@ static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
amdgpu_encoder->devices |= supported_device;
return;
}
-
}
/* add a new one */
@@ -3020,10 +3510,8 @@ static void dce_v6_0_encoder_add(struct amdgpu_device *adev,
}
static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
- .set_vga_render_state = &dce_v6_0_set_vga_render_state,
.bandwidth_update = &dce_v6_0_bandwidth_update,
.vblank_get_counter = &dce_v6_0_vblank_get_counter,
- .vblank_wait = &dce_v6_0_vblank_wait,
.backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
.backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
.hpd_sense = &dce_v6_0_hpd_sense,
@@ -3033,40 +3521,40 @@ static const struct amdgpu_display_funcs dce_v6_0_display_funcs = {
.page_flip_get_scanoutpos = &dce_v6_0_crtc_get_scanoutpos,
.add_encoder = &dce_v6_0_encoder_add,
.add_connector = &amdgpu_connector_add,
- .stop_mc_access = &dce_v6_0_stop_mc_access,
- .resume_mc_access = &dce_v6_0_resume_mc_access,
};
static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev)
{
- if (adev->mode_info.funcs == NULL)
- adev->mode_info.funcs = &dce_v6_0_display_funcs;
+ adev->mode_info.funcs = &dce_v6_0_display_funcs;
}
static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = {
- .set = dce_v6_0_set_crtc_interrupt_state,
+ .set = dce_v6_0_set_crtc_irq_state,
.process = dce_v6_0_crtc_irq,
};
static const struct amdgpu_irq_src_funcs dce_v6_0_pageflip_irq_funcs = {
- .set = dce_v6_0_set_pageflip_interrupt_state,
+ .set = dce_v6_0_set_pageflip_irq_state,
.process = dce_v6_0_pageflip_irq,
};
static const struct amdgpu_irq_src_funcs dce_v6_0_hpd_irq_funcs = {
- .set = dce_v6_0_set_hpd_interrupt_state,
+ .set = dce_v6_0_set_hpd_irq_state,
.process = dce_v6_0_hpd_irq,
};
static void dce_v6_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_LAST;
+ if (adev->mode_info.num_crtc > 0)
+ adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
+ else
+ adev->crtc_irq.num_types = 0;
adev->crtc_irq.funcs = &dce_v6_0_crtc_irq_funcs;
- adev->pageflip_irq.num_types = AMDGPU_PAGEFLIP_IRQ_LAST;
+ adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
adev->pageflip_irq.funcs = &dce_v6_0_pageflip_irq_funcs;
- adev->hpd_irq.num_types = AMDGPU_HPD_LAST;
+ adev->hpd_irq.num_types = adev->mode_info.num_hpd;
adev->hpd_irq.funcs = &dce_v6_0_hpd_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 3c558c170e5e..2ccd6aad8dd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -20,7 +20,13 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-#include "drmP.h"
+
+#include <drm/drm_edid.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_vblank.h>
+
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_i2c.h"
@@ -31,6 +37,7 @@
#include "atombios_encoders.h"
#include "amdgpu_pll.h"
#include "amdgpu_connectors.h"
+#include "amdgpu_display.h"
#include "dce_v8_0.h"
#include "dce/dce_8_0_d.h"
@@ -47,8 +54,7 @@
static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev);
static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev);
-static const u32 crtc_offsets[6] =
-{
+static const u32 crtc_offsets[6] = {
CRTC0_REGISTER_OFFSET,
CRTC1_REGISTER_OFFSET,
CRTC2_REGISTER_OFFSET,
@@ -57,8 +63,7 @@ static const u32 crtc_offsets[6] =
CRTC5_REGISTER_OFFSET
};
-static const u32 hpd_offsets[] =
-{
+static const u32 hpd_offsets[] = {
HPD0_REGISTER_OFFSET,
HPD1_REGISTER_OFFSET,
HPD2_REGISTER_OFFSET,
@@ -140,66 +145,6 @@ static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev,
spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
}
-static bool dce_v8_0_is_in_vblank(struct amdgpu_device *adev, int crtc)
-{
- if (RREG32(mmCRTC_STATUS + crtc_offsets[crtc]) &
- CRTC_V_BLANK_START_END__CRTC_V_BLANK_START_MASK)
- return true;
- else
- return false;
-}
-
-static bool dce_v8_0_is_counter_moving(struct amdgpu_device *adev, int crtc)
-{
- u32 pos1, pos2;
-
- pos1 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
- pos2 = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
-
- if (pos1 != pos2)
- return true;
- else
- return false;
-}
-
-/**
- * dce_v8_0_vblank_wait - vblank wait asic callback.
- *
- * @adev: amdgpu_device pointer
- * @crtc: crtc to wait for vblank on
- *
- * Wait for vblank on the requested crtc (evergreen+).
- */
-static void dce_v8_0_vblank_wait(struct amdgpu_device *adev, int crtc)
-{
- unsigned i = 100;
-
- if (crtc >= adev->mode_info.num_crtc)
- return;
-
- if (!(RREG32(mmCRTC_CONTROL + crtc_offsets[crtc]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK))
- return;
-
- /* depending on when we hit vblank, we may be close to active; if so,
- * wait for another frame.
- */
- while (dce_v8_0_is_in_vblank(adev, crtc)) {
- if (i++ == 100) {
- i = 0;
- if (!dce_v8_0_is_counter_moving(adev, crtc))
- break;
- }
- }
-
- while (!dce_v8_0_is_in_vblank(adev, crtc)) {
- if (i++ == 100) {
- i = 0;
- if (!dce_v8_0_is_counter_moving(adev, crtc))
- break;
- }
- }
-}
-
static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
{
if (crtc >= adev->mode_info.num_crtc)
@@ -232,6 +177,7 @@ static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
* @crtc_id: crtc to cleanup pageflip on
* @crtc_base: new address of the crtc (GPU MC address)
+ * @async: asynchronous flip
*
* Triggers the actual pageflip by updating the primary
* surface base address.
@@ -240,10 +186,14 @@ static void dce_v8_0_page_flip(struct amdgpu_device *adev,
int crtc_id, u64 crtc_base, bool async)
{
struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
+ struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
/* flip at hsync for async, default is vsync */
WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
+ /* update pitch */
+ WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
+ fb->pitches[0] / fb->format->cpp[0]);
/* update the primary scanout addresses */
WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
upper_32_bits(crtc_base));
@@ -315,6 +265,21 @@ static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
}
+static void dce_v8_0_hpd_int_ack(struct amdgpu_device *adev,
+ int hpd)
+{
+ u32 tmp;
+
+ if (hpd >= adev->mode_info.num_hpd) {
+ DRM_DEBUG("invalid hpd %d\n", hpd);
+ return;
+ }
+
+ tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
+ tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
+ WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
+}
+
/**
* dce_v8_0_hpd_init - hpd setup callback.
*
@@ -325,11 +290,13 @@ static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
*/
static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -352,9 +319,11 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
continue;
}
+ dce_v8_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
/**
@@ -367,11 +336,13 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
*/
static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
{
- struct drm_device *dev = adev->ddev;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
u32 tmp;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
@@ -379,10 +350,11 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
- WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], 0);
+ WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
}
+ drm_connector_list_iter_end(&iter);
}
static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
@@ -419,81 +391,6 @@ static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev)
return true;
}
-static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- u32 crtc_enabled, tmp;
- int i;
-
- save->vga_render_control = RREG32(mmVGA_RENDER_CONTROL);
- save->vga_hdp_control = RREG32(mmVGA_HDP_CONTROL);
-
- /* disable VGA render */
- tmp = RREG32(mmVGA_RENDER_CONTROL);
- tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
- WREG32(mmVGA_RENDER_CONTROL, tmp);
-
- /* blank the display controllers */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
- CRTC_CONTROL, CRTC_MASTER_EN);
- if (crtc_enabled) {
-#if 1
- save->crtc_enabled[i] = true;
- tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
- if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
- /*it is correct only for RGB ; black is 0*/
- WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
- tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
- WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- }
- mdelay(20);
-#else
- /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
- tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
- tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
- WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
- WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
- save->crtc_enabled[i] = false;
- /* ***** */
-#endif
- } else {
- save->crtc_enabled[i] = false;
- }
- }
-}
-
-static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev,
- struct amdgpu_mode_mc_save *save)
-{
- u32 tmp;
- int i;
-
- /* update crtc base addresses */
- for (i = 0; i < adev->mode_info.num_crtc; i++) {
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
- upper_32_bits(adev->mc.vram_start));
- WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
- (u32)adev->mc.vram_start);
-
- if (save->crtc_enabled[i]) {
- tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
- tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
- WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
- }
- mdelay(20);
- }
-
- WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
- WREG32(mmVGA_MEMORY_BASE_ADDRESS, lower_32_bits(adev->mc.vram_start));
-
- /* Unlock vga access */
- WREG32(mmVGA_HDP_CONTROL, save->vga_hdp_control);
- mdelay(1);
- WREG32(mmVGA_RENDER_CONTROL, save->vga_render_control);
-}
-
static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
bool render)
{
@@ -565,7 +462,7 @@ void dce_v8_0_disable_dce(struct amdgpu_device *adev)
static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -1091,8 +988,11 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
- line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
+ active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
+ (u32)mode->clock);
+ line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
+ (u32)mode->clock);
+ line_time = min_t(u32, line_time, 65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
@@ -1122,7 +1022,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
wm_high.num_heads = num_heads;
/* set for high clocks */
- latency_watermark_a = min(dce_v8_0_latency_watermark(&wm_high), (u32)65535);
+ latency_watermark_a = min_t(u32, dce_v8_0_latency_watermark(&wm_high), 65535);
/* possibly force display priority to high */
/* should really do this at mode validation time... */
@@ -1161,7 +1061,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
wm_low.num_heads = num_heads;
/* set for low clocks */
- latency_watermark_b = min(dce_v8_0_latency_watermark(&wm_low), (u32)65535);
+ latency_watermark_b = min_t(u32, dce_v8_0_latency_watermark(&wm_low), 65535);
/* possibly force display priority to high */
/* should really do this at mode validation time... */
@@ -1196,8 +1096,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
/* save values for DPM */
amdgpu_crtc->line_time = line_time;
- amdgpu_crtc->wm_high = latency_watermark_a;
- amdgpu_crtc->wm_low = latency_watermark_b;
+
/* Save number of lines the linebuffer leads before the scanout */
amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
}
@@ -1216,7 +1115,7 @@ static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev)
u32 num_heads = 0, lb_size;
int i;
- amdgpu_update_display_priority(adev);
+ amdgpu_display_update_priority(adev);
for (i = 0; i < adev->mode_info.num_crtc; i++) {
if (adev->mode_info.crtcs[i]->base.enabled)
@@ -1264,7 +1163,7 @@ static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *ade
static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(encoder->dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 offset;
@@ -1281,10 +1180,12 @@ static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 tmp = 0, offset;
@@ -1293,12 +1194,14 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
offset = dig->afmt->pin->offset;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
@@ -1338,10 +1241,12 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
u32 offset, tmp;
u8 *sadb = NULL;
@@ -1352,19 +1257,21 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
offset = dig->afmt->pin->offset;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
return;
}
- sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
+ sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb);
if (sad_count < 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
sad_count = 0;
@@ -1387,11 +1294,13 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
{
- struct amdgpu_device *adev = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
u32 offset;
struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
struct amdgpu_connector *amdgpu_connector = NULL;
struct cea_sad *sads;
int i, sad_count;
@@ -1416,23 +1325,25 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
offset = dig->afmt->pin->offset;
- list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
if (connector->encoder == encoder) {
amdgpu_connector = to_amdgpu_connector(connector);
break;
}
}
+ drm_connector_list_iter_end(&iter);
if (!amdgpu_connector) {
DRM_ERROR("Couldn't find encoder's connector\n");
return;
}
- sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
- if (sad_count <= 0) {
+ sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads);
+ if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ if (sad_count <= 0)
return;
- }
BUG_ON(!sads);
for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
@@ -1448,9 +1359,9 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
if (sad->channels > max_channels) {
value = (sad->channels <<
AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
- (sad->byte2 <<
+ (sad->byte2 <<
AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
- (sad->freq <<
+ (sad->freq <<
AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
max_channels = sad->channels;
}
@@ -1482,15 +1393,14 @@ static void dce_v8_0_audio_enable(struct amdgpu_device *adev,
enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
}
-static const u32 pin_offsets[7] =
-{
- (0x1780 - 0x1780),
- (0x1786 - 0x1780),
- (0x178c - 0x1780),
- (0x1792 - 0x1780),
- (0x1798 - 0x1780),
- (0x179d - 0x1780),
- (0x17a4 - 0x1780),
+static const u32 pin_offsets[7] = {
+ AUD0_REGISTER_OFFSET,
+ AUD1_REGISTER_OFFSET,
+ AUD2_REGISTER_OFFSET,
+ AUD3_REGISTER_OFFSET,
+ AUD4_REGISTER_OFFSET,
+ AUD5_REGISTER_OFFSET,
+ AUD6_REGISTER_OFFSET,
};
static int dce_v8_0_audio_init(struct amdgpu_device *adev)
@@ -1532,17 +1442,12 @@ static int dce_v8_0_audio_init(struct amdgpu_device *adev)
static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
{
- int i;
-
if (!amdgpu_audio)
return;
if (!adev->mode_info.audio.enabled)
return;
- for (i = 0; i < adev->mode_info.audio.num_pins; i++)
- dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
-
adev->mode_info.audio.enabled = false;
}
@@ -1552,7 +1457,7 @@ static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -1575,7 +1480,7 @@ static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
void *buffer, size_t size)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
@@ -1595,7 +1500,7 @@ static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
@@ -1622,7 +1527,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
@@ -1747,7 +1652,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
dce_v8_0_audio_write_sad_regs(encoder);
dce_v8_0_audio_write_latency_fields(encoder, mode);
- err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
+ err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
if (err < 0) {
DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
return;
@@ -1784,7 +1689,7 @@ static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -1843,8 +1748,7 @@ static void dce_v8_0_afmt_fini(struct amdgpu_device *adev)
}
}
-static const u32 vga_control_regs[6] =
-{
+static const u32 vga_control_regs[6] = {
mmD1VGA_CONTROL,
mmD2VGA_CONTROL,
mmD3VGA_CONTROL,
@@ -1857,7 +1761,7 @@ static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
u32 vga_control;
vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
@@ -1871,7 +1775,7 @@ static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
if (enable)
WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
@@ -1885,8 +1789,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_framebuffer *amdgpu_fb;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_framebuffer *target_fb;
struct drm_gem_object *obj;
struct amdgpu_bo *abo;
@@ -1897,7 +1800,6 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1905,32 +1807,29 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
return 0;
}
- if (atomic) {
- amdgpu_fb = to_amdgpu_framebuffer(fb);
+ if (atomic)
target_fb = fb;
- } else {
- amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb);
+ else
target_fb = crtc->primary->fb;
- }
/* If atomic, assume fb object is pinned & idle & fenced and
* just update base pointers
*/
- obj = amdgpu_fb->obj;
+ obj = target_fb->obj[0];
abo = gem_to_amdgpu_bo(obj);
r = amdgpu_bo_reserve(abo, false);
if (unlikely(r != 0))
return r;
- if (atomic) {
- fb_location = amdgpu_bo_gpu_offset(abo);
- } else {
- r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM, &fb_location);
+ if (!atomic) {
+ abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
if (unlikely(r != 0)) {
amdgpu_bo_unreserve(abo);
return -EINVAL;
}
}
+ fb_location = amdgpu_bo_gpu_offset(abo);
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
amdgpu_bo_unreserve(abo);
@@ -2001,9 +1900,19 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
/* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
bypass_lut = true;
break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
+ (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
+ fb_swap = ((GRPH_RED_SEL_B << GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT) |
+ (GRPH_BLUE_SEL_R << GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT));
+#ifdef __BIG_ENDIAN
+ fb_swap |= (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
+#endif
+ break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->format->format, &format_name));
+ DRM_ERROR("Unsupported screen format %p4cc\n",
+ &target_fb->format->format);
return -EINVAL;
}
@@ -2087,8 +1996,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
if (!atomic && fb && fb != crtc->primary->fb) {
- amdgpu_fb = to_amdgpu_framebuffer(fb);
- abo = gem_to_amdgpu_bo(amdgpu_fb->obj);
+ abo = gem_to_amdgpu_bo(fb->obj[0]);
r = amdgpu_bo_reserve(abo, true);
if (unlikely(r != 0))
return r;
@@ -2106,7 +2014,7 @@ static void dce_v8_0_set_interleave(struct drm_crtc *crtc,
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
- struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);